query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Write the concordance entries to the output file(filename) See sample output files for format. | def write_concordance(self, filename):
all_keys = self.concordance_table.get_all_keys()
lines = []
for i in all_keys:
a = ""
a += i + ":"
f = self.concordance_table.get_value(i)
if f != None:
for s in f:
a += " " + str(s)
a += "\n"
lines.append(a)
a = open(filename, "w+")
for i in lines:
a.write(i)
a.close() | [
"def write_concordance(self, filename):\r\n key_list = self.concordance_table.get_all_keys()\r\n key_list.sort()\r\n write_text = ''\r\n for x in range(0,len(key_list)):\r\n values = self.concordance_table.get_value(key_list[x])\r\n values_str = ''\r\n for y in range(0, len(values)):\r\n values_str += str(values[y]) + ' '\r\n write_text += key_list[x] + ': ' + values_str[:len(values_str) - 1] + '\\n'\r\n write_text = write_text[:len(write_text) - 1]\r\n write_file = open(filename, 'w')\r\n write_file.write(write_text)\r\n write_file.close()",
"def write_concordance(self, filename):\n out = ''\n values = [x for x in self.concordance_table.hash_table if x is not None]\n values.sort(key=lambda x: x[0])\n for v in values:\n out += f'{v[0]}: {\" \".join(str(x) for x in sorted(set(v[1])))}\\n' \n with open(filename, 'w') as f:\n f.write(out.rstrip())",
"def write_concordance(self, filename):\n file = open(filename, \"w\")\n keys = self.concordance_table.get_all_keys()\n keys.sort()\n for i in range(len(keys)):\n index = self.concordance_table.get_index(keys[i])\n key = self.concordance_table.hash_table[index][0]\n values = self.concordance_table.hash_table[index][1]\n string_of_values = \"\"\n for j in values:\n string_of_values += \" \" + str(j)\n\n content = \"{0}:{1}\".format(key, string_of_values)\n\n if i == len(keys) - 1:\n file.write(content)\n else:\n file.write(content + \"\\n\")\n\n file.close()",
"def write_cando_file(self, file_name):\n cando_writer = CandoWriter(self.dna_structure)\n cando_writer.write(file_name)",
"def conllWrite(outputPath, sentences, headers):\n if not os.path.exists(os.path.dirname(outputPath)):\n os.makedirs(os.path.dirname(outputPath))\n fOut = open(outputPath, 'w')\n\n for sentence in sentences:\n fOut.write(\"#\")\n fOut.write(\"\\t\".join(headers))\n fOut.write(\"\\n\")\n for tokenIdx in range(len(sentence[headers[0]])):\n aceData = [sentence[key][tokenIdx] for key in headers]\n fOut.write(\"\\t\".join(aceData))\n fOut.write(\"\\n\")\n fOut.write(\"\\n\")",
"def _write_conductances(self, cond_file_name):\n cond_file_path = os.path.join(OM_STORAGE_DIR, cond_file_name)\n\n #TODO: Check that the file doesn't already exist.\n LOG.info(\"Writing head conductance file: %s\" % cond_file_path)\n file_handle = file(cond_file_path, \"a\")\n\n file_handle.write(\"# Properties Description 1.0 (Conductivities)\\n\\n\")\n file_handle.write(\"Air %4.2f\\n\" % self.conductances[\"air\"])\n file_handle.write(\"Scalp %4.2f\\n\" % self.conductances[\"skin\"])\n file_handle.write(\"Brain %4.2f\\n\" % self.conductances[\"brain\"])\n file_handle.write(\"Skull %4.2f\\n\" % self.conductances[\"skull\"])\n\n file_handle.close()\n LOG.info(\"%s written successfully.\" % cond_file_path)\n\n return cond_file_path",
"def write_conll_to_file(self, file_path):\n self.conll_file.write_conll(file_path)",
"def write_CA_atoms():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n ca_list = []\n with open(filepath, 'r') as pdb:\n for line in pdb:\n if line[:4] == 'ATOM' and line[12:16] == \" CA \":\n line_split = line.split()[6:9]\n ca_list.append(line_split)\n choice1 = input('Enter name of the outfile: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in ca_list:\n outfile.writelines(i)\n print('Done!')\n print(i)",
"def write_to_fasta(self, output_file):\n fw = FastaWriter(output_file)\n for file_path, template, complement in self.results:\n if template:\n header = \"{0} {1}\".format(file_path, \"template\")\n fw.write_entry(header, template)\n if complement:\n header = \"{0} {1}\".format(file_path, \"complement\")\n fw.write_entry(header, complement)",
"def file_output(matches: list, output_file_name: str = 'matches.txt'):\n with open(\"test/Matches/\" + output_file_name, 'w') as f:\n for match in matches:\n for event in match.events:\n f.write(\"%s\\n\" % event.payload)\n f.write(\"\\n\")",
"def write(self, filename):\n with open(filename, 'w') as f:\n for entry in self.entrys:\n f.write(self._entry_to_string(entry) + '\\n')\n logging.info('Wrote {0} entrys to file {1}'.format(\n len(self.entrys), filename))",
"def write_antecedent_decisions_to_file(self, file):\n for document in self.documents:\n document.write_antecedent_decisions_to_file(file)",
"def write_to_file(self):\n self._file_writer.write(self._reconstructed_sentences)",
"def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()",
"def write_corpus_to_file(output_file, corpus): \n \n file = open(output_file, 'w')\n for line in corpus: \n file.write(line)\n print ('Corpus has been writted in file')\n file.close()",
"def write_file(self, filename):\n\n with open(filename, 'w', newline = '') as csvfile:\n langwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for key in self.features:\n value = self.features[key]\n l = []\n for val in value:\n l.append(str(val))\n langwriter.writerow([l])\n return",
"def write_cn_cards(bc_file, bc_class):\n cn = bc_class.constituent_properties\n bc_file.write('! Constituent Properties\\n')\n if not cn.general_constituents.empty:\n # bc_file.write(cn.general_constituents.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.general_constituents.iterrows():\n bc_file.write(\n 'CN CON {} {}\\n'.format(row['ID'].astype('int'), row['CONC']))\n if not cn.sand.empty:\n # bc_file.write(cn.sand.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.sand.iterrows():\n bc_file.write(\n 'CN SND {} {} {} {} {}\\n'.format(row['ID'].astype('int'), *row[['C_0', 'C_1', 'C_2', 'C_3']].values))\n if not cn.clay.empty:\n # bc_file.write(cn.clay.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.clay.iterrows():\n bc_file.write(\n 'CN CLA {} {} {} {} {}\\n'.format(row['ID'].astype('int'), *row[['C_0', 'C_1', 'C_2', 'C_3']].values))\n if cn.salinity:\n bc_file.write('CN SAL {} {}\\n'.format(cn.salinity_id, cn.reference_concentration))\n if cn.temperature:\n bc_file.write('CN TMP {} {}\\n'.format(cn.temperature_id, cn.reference_temperature))\n if cn.vorticity:\n bc_file.write('CN VOR {} {} {} {}\\n'.format(cn.vorticity_id, cn.vorticity_normalization,\n cn.vorticity_as_term, cn.vorticity_ds_term))\n\n bc_file.write('\\n') # blank line at the end of the Constituent Properties",
"def write_file(self):\n with open(self._file_name, 'w') as output_file:\n output_file.writelines([line + '\\n' for line in self._lines])",
"def write_conformers(self, filename): # ccids):\n cnt = 0\n for confId in range(self.nconf): #ccids:\n w = Chem.SDWriter('%s_c%03d.sdf'%(filename,cnt+1))\n w.write(self.mol, confId=confId)\n w.flush()\n w.close()\n cnt += 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds a kfactor circulant matrix (A matrix with the structure of circulant matrices, but with the entries above the diagonal multiplied by the same factor.) The matrix is store in memory. | def factor_circulant_matrix(x, k):
n=len(x)
return circulant(x) * (tri(n,n, 0) + k*np.transpose(tri(n,n, -1))) | [
"def generate_k_circulant(n: int, k: int):\n return nx.to_numpy_matrix(\n nx.generators.classic.circulant_graph(n, list(range(1, k + 1))),\n dtype=np.int64,\n )",
"def _calc_k_matrix(self):\n el_len = self.coord_electrode.size\n h = float(np.diff(self.coord_electrode).min())\n\n c_jm1 = np.eye(el_len + 2, k=0) / h\n c_jm1[0, 0] = 0\n\n c_j0 = np.eye(el_len + 2) / h\n c_j0[-1, -1] = 0\n\n c_jall = c_j0\n c_jall[0, 0] = 1\n c_jall[-1, -1] = 1\n\n tjp1 = np.eye(el_len + 2, k=1)\n tjm1 = np.eye(el_len + 2, k=-1)\n\n tj0 = np.eye(el_len + 2)\n tj0[0, 0] = 0\n tj0[-1, -1] = 0\n\n # Defining K-matrix used to calculate e_mat1-3\n return np.dot(np.linalg.inv(np.dot(c_jm1, tjm1) +\n 2 * np.dot(c_jm1, tj0) +\n 2 * c_jall +\n np.dot(c_j0, tjp1)),\n 3 * (np.dot(np.dot(c_jm1, c_jm1), tj0) -\n np.dot(np.dot(c_jm1, c_jm1), tjm1) +\n np.dot(np.dot(c_j0, c_j0), tjp1) -\n np.dot(np.dot(c_j0, c_j0), tj0)))",
"def form_factor_k_k(self, q):\n return self._ff_k_k.form_factor(q=q, couplings=self._couplings)",
"def _K(m):\n M = m*(m - 1)/2\n K = np.zeros((M, m**2), dtype=np.int64)\n row = 0\n for j in range(1, m):\n col = (j - 1)*m + j\n s = m - j\n K[row:(row+s), col:(col+s)] = np.eye(s)\n row += s\n return K",
"def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue",
"def ckm(i,j):\n if i >= 1 and i <= 3 and j >= 1 and j <= 3:\n return _ckm_abs[i-1, j-1]\n else:\n raise(ValueError('Wrong generation index in CKM matrix: ({},{}).'.format(i,j)))",
"def factor_circulant_multiplication(u, x, k=1):\n n = len(u) \n D_k = (k**(1/n))**np.arange(0,n)\n Lambda = fft(D_k*x)\n return (1/D_k)*real(ifft(Lambda*fft(D_k*u))) # y",
"def clement(n, k=0):\n z = np.arange(1, n)\n\n if k == 0:\n a = np.diag(z[::-1], -1) + np.diag(z, 1)\n else:\n y = np.sqrt(z[::-1] * z)\n a = np.diag(y, -1) + np.diag(y, 1)\n\n return a",
"def _diagk(X, k):\n X = np.asanyarray(X)\n s = X.shape\n if len(s) > 1:\n D = np.diag(X, k)\n else:\n D = np.array([])\n\n return D",
"def build_k_by_k_matrix(k1, k2, n_clusters):\r\n\r\n m = np.zeros((n_clusters, n_clusters))\r\n for class1, class2 in zip(k1, k2):\r\n m[class1][class2] += 1\r\n print(m)\r\n return m",
"def create_matrix(lengths, covs, kmers):\n\n C = _create_and_normalize_covs(covs, lengths)\n K = _create_and_normalize_kmers(kmers)\n\n return np.hstack((K,C))",
"def kronecker_graph(g, k, add_self_edges=True, strip_self_edges=True):\n\n adj = nx.adjacency_matrix(g).todense()\n if add_self_edges:\n for i in range(len(adj)):\n adj[i, i] = 1\n mat = adj\n for i in range(k - 1):\n mat = np.kron(mat, adj)\n if strip_self_edges:\n for i in range(len(mat)):\n mat[i, i] = 0\n name = \"kronecker(%s, %s, %s, %s)\" % (\n g.name if g.name else hash(g), k, add_self_edges, strip_self_edges)\n return nx.Graph(mat, name=name)",
"def dimension_cusp_forms(self, k, eps=0):\n if eps == 0:\n eps = self._sgn\n return self._weil_module.dimension_cusp_forms(k, eps)",
"def nCkarray(*k_values):\n result = 1\n for i, j in enumerate((m for k in k_values for m in range(1, k+1)), 1):\n result = (result * i) // j\n return result",
"def cdf(self, k):\n\n if k < 0 or k > self.n:\n return 0\n\n k = int(k)\n ans = 0\n for i in range(0, k + 1):\n ans += self.pmf(i)\n return ans",
"def matrices(self):\n # Creating L\n L = scipy.sparse.diags((self.inv_dx2, -2*self.inv_dx2, self.inv_dx2, 1),\n (-(self.N+1), -self.N, -(self.N-1), self.N),\n shape=(2*self.N, 2*self.N), dtype=np.complex128)\n self.L = scipy.sparse.csr_matrix(L)\n self.L[-(self.N+1), 0], self.L[-1, -self.N] = 0, 0\n\n # Computing largest eigenvalue of L explicitely:\n self.mu_max = self.inv_dx*np.sqrt(2*(1 + np.cos(np.pi/(self.N+1))))\n\n # Creating K\n self.K = scipy.sparse.diags((-self.inv_dx2, 2*self.inv_dx2, -self.inv_dx2),\n (-1, 0, 1), # Diagonals\n shape=(self.N, self.N), # Size of matrix\n dtype=np.complex128)",
"def CartanMatrix(ct):\n\n return CartanType(ct).cartan_matrix()",
"def kronecker(self, value):\n if not (type(self) == type(value)):\n raise TypeError(\"Inappropriate argument type for kronecker product\")\n returnvalue = Matrix()\n for i in range(self._height):\n for j in range(value._height):\n newRow = list()\n for k in range(self._width):\n for l in range(value._width):\n newRow.append(self[i][k] * value[j][l])\n returnvalue.addRow(*newRow)\n return returnvalue",
"def __factor_matrix(self, R, K, alpha, steps, beta, error_limit):\n # Transform regular array to numpy array\n R = numpy.array(R)\n\n # Generate P - N x K\n # Use random values to start. Best performance\n N = len(R)\n M = len(R[0])\n P = numpy.random.rand(N, K)\n\n # Generate Q - M x K\n # Use random values to start. Best performance\n Q = numpy.random.rand(M, K)\n Q = Q.T\n\n error = 0\n\n # iterate through max # of steps\n for step in xrange(steps):\n\n # iterate each cell in r\n for i in xrange(len(R)):\n for j in xrange(len(R[i])):\n if R[i][j] > 0:\n\n # get the eij (error) side of the equation\n eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])\n\n for k in xrange(K):\n # (*update_rule) update pik_hat\n P[i][k] = P[i][k] + alpha * (2 * eij * Q[k][j] - beta * P[i][k])\n\n # (*update_rule) update qkj_hat\n Q[k][j] = Q[k][j] + alpha * ( 2 * eij * P[i][k] - beta * Q[k][j] )\n\n # Measure error\n error = self.__error(R, P, Q, K, beta)\n\n # Terminate when we converge\n if error < error_limit:\n break\n\n # track Q, P (learned params)\n # Q = Products x feature strength\n # P = Users x feature strength\n self.Q = Q.T\n self.P = P\n\n self.__print_fit_stats(error, N, M)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the matrixvector product y = Cu where C is a kfactor circulant matrix All matrices are real | def factor_circulant_multiplication(u, x, k=1):
n = len(u)
D_k = (k**(1/n))**np.arange(0,n)
Lambda = fft(D_k*x)
return (1/D_k)*real(ifft(Lambda*fft(D_k*u))) # y | [
"def scalarMultiplication(self,c):\n matrixResult = [[complex.ComplexNumber(0,0) for x in range(self.m)] for y in range(self.n)] \n for i in range (self.m):\n for j in range (self.n):\n matrixResult[i][j]=self.mtx[i][j].multiplication(c)\n matResult = Matrix(matrixResult)\n return matResult",
"def updateC(A, U, B):\n \n m_dim = A.shape[1] \n q_dim = B.shape[0]\n \n C_tensor = np.zeros((m_dim, m_dim, q_dim), dtype=np.complex)\n \n for k in range(q_dim):\n A_k = A[:, :, k]\n b_k = B[k]\n \n x_hat = U @ b_k\n y_hat = A_k.conj().T @ x_hat\n \n phase_y = np.exp(1j*np.angle(y_hat))\n #phase_y = np.sign(y_hat)\n C_k = np.diag(phase_y)\n C_tensor[:, :, k] = C_k\n \n \n return C_tensor",
"def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product",
"def factor_circulant_matrix(x, k):\n n=len(x)\n return circulant(x) * (tri(n,n, 0) + k*np.transpose(tri(n,n, -1)))",
"def covar(fx,cx):\n \n fx = np.array(fx)\n cx = np.array(cx)\n \n shape_fx = fx.shape\n shape_cx = cx.shape\n \n \n if shape_fx[1] != shape_cx[0]:\n print('-----------------------------------------')\n print(\"Shapes of fx and cx cannot be multiplied:\")\n print(shape_fx,\"x\",shape_cx)\n print('-----------------------------------------')\n raise ValueError('Input matrices are not compliant')\n \n cy = np.dot(np.dot(fx,cx),fx.T)\n \n print(\"Size of Cy matrix: \",np.shape(cy))\n \n return cy",
"def matmult(*x):\n return reduce(np.dot, x)",
"def factors_multiple(self, U=None):\n factors = U.dot(self.C_)\n if self.C_bias_.shape[0]:\n factors[:] += self.C_bias_.reshape((1,-1))\n return factors",
"def calc_CCuij(U, V):\n ## FIXME: Check for non-positive Uij's, 2009-08-19\n invU = linalg.inverse(U)\n invV = linalg.inverse(V)\n #invU = internal_inv3x3(U)\n #invV = internal_inv3x3(V)\n \n det_invU = linalg.determinant(invU)\n det_invV = linalg.determinant(invV)\n\n return ( math.sqrt(math.sqrt(det_invU * det_invV)) /\n math.sqrt((1.0/8.0) * linalg.determinant(invU + invV)) )",
"def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue",
"def c_matrix(x1,x2,x3):\n\tC = np.array([\t[\t2*(x2-x1), \t\t(x2-x1), \t\t\t0\t\t\t], \\\n\t\t\t\t\t[\t(x2-x1), \t\t2*(x3-x1), \t\t(x3-x2)\t\t], \\\n\t\t\t\t\t[\t0,\t\t\t\t(x3-x2),\t\t2*(x3-x2)\t] \t], \\\n\t\t\t\t\tfloat)\n\treturn(C)",
"def cov(zc):\n s, NM = zc.shape\n C = np.dot(zc, zc.T.conj()) / NM\n\n return C.real # C should be real, as C = A A^T and A is real",
"def _calc_ucf_fuc(self):\n ucf_mat = self._ucf_mat\n fuc_mat = self._fuc_mat\n tmp1 = self._tmp1\n c_layers = self._c_layers\n f_layers = self._f_layers\n depth, n = self.num_cnots, self._num_qubits\n\n # tmp1 = U^dagger.\n np.conj(self.target_matrix.T, out=tmp1)\n\n # ucf_mat = fuc_mat = U^dagger @ C = U^dagger @ C_{depth-1} @ ... @ C_{0}.\n self._ucf_mat.set_matrix(tmp1)\n for q in range(depth - 1, -1, -1):\n ucf_mat.mul_right_q2(c_layers[q], temp_mat=tmp1, dagger=False)\n fuc_mat.set_matrix(ucf_mat.finalize(temp_mat=tmp1))\n\n # fuc_mat = F @ U^dagger @ C = F_{n-1} @ ... @ F_{0} @ U^dagger @ C.\n for q in range(n):\n fuc_mat.mul_left_q1(f_layers[q], temp_mat=tmp1)\n\n # ucf_mat = U^dagger @ C @ F = U^dagger @ C @ F_{n-1} @ ... @ F_{0}.\n for q in range(n - 1, -1, -1):\n ucf_mat.mul_right_q1(f_layers[q], temp_mat=tmp1, dagger=False)",
"def calculate_abc(self):\n (a,b,c, V) = crystal_calc.make_lattice_vectors(self.lattice_lengths, self.lattice_angles)\n #Now rotate all these vectors by the U matrix\n self.a = np.dot(self.u_matrix, a).reshape(1,3)\n self.b = np.dot(self.u_matrix, b).reshape(1,3)\n self.c = np.dot(self.u_matrix, c).reshape(1,3)",
"def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z",
"def compute_factors(self):\n self.O = np.zeros((self.u, self.k))\n self.A = np.zeros((self.v, self.k))\n self.C = np.zeros((self.n, self.k))\n\n for i in range(self.k):\n for j in range(self.u):\n self.O[j, i] = (\n (self.Nku[i, j] + self.alpha)\n / (self.Nu[j] + self.alpha * self.k))\n for j in range(self.v):\n self.A[j, i] = (\n (self.Nkv[i, j] + self.beta)\n / (self.Nk[i] + self.v * self.beta))\n for j in range(self.n):\n self.C[j, i] = (\n (self.Nkn[i, j] + self.gamma)\n / (self.Nk[i] + self.n * self.gamma))\n\n # print(self.O.sum(axis=1))\n # print(self.A.sum(axis=0))\n # print(self.C.sum(axis=0))\n return self.O, self.A, self.C",
"def _C(self):\n\n # Find the local x and y coordinates at each node\n xi = 0\n yi = 0\n xj = self.width()\n yj = 0\n xm = xj\n ym = self.height()\n xn = 0\n yn = ym\n\n # Calculate the [C] coefficient matrix\n C = array([[1, xi, yi, xi**2, xi*yi, yi**2, xi**3, xi**2*yi, xi*yi**2, yi**3, xi**3*yi, xi*yi**3],\n [0, 0, 1, 0, xi, 2*yi, 0, xi**2, 2*xi*yi, 3*yi**2, xi**3, 3*xi*yi**2],\n [0, -1, 0, -2*xi, -yi, 0, -3*xi**2, -2*xi*yi, -yi**2, 0, -3*xi**2*yi, -yi**3],\n \n [1, xj, yj, xj**2, xj*yj, yj**2, xj**3, xj**2*yj, xj*yj**2, yj**3, xj**3*yj, xj*yj**3],\n [0, 0, 1, 0, xj, 2*yj, 0, xj**2, 2*xj*yj, 3*yj**2, xj**3, 3*xj*yj**2],\n [0, -1, 0, -2*xj, -yj, 0, -3*xj**2, -2*xj*yj, -yj**2, 0, -3*xj**2*yj, -yj**3],\n\n [1, xm, ym, xm**2, xm*ym, ym**2, xm**3, xm**2*ym, xm*ym**2, ym**3, xm**3*ym, xm*ym**3],\n [0, 0, 1, 0, xm, 2*ym, 0, xm**2, 2*xm*ym, 3*ym**2, xm**3, 3*xm*ym**2],\n [0, -1, 0, -2*xm, -ym, 0, -3*xm**2, -2*xm*ym, -ym**2, 0, -3*xm**2*ym, -ym**3],\n\n [1, xn, yn, xn**2, xn*yn, yn**2, xn**3, xn**2*yn, xn*yn**2, yn**3, xn**3*yn, xn*yn**3],\n [0, 0, 1, 0, xn, 2*yn, 0, xn**2, 2*xn*yn, 3*yn**2, xn**3, 3*xn*yn**2],\n [0, -1, 0, -2*xn, -yn, 0, -3*xn**2, -2*xn*yn, -yn**2, 0, -3*xn**2*yn, -yn**3]])\n \n # Return the coefficient matrix\n return C",
"def mulc(self, c):\n new = []\n for i in range(len(self.values)):\n new.append(self.values[i] * c)\n return Vec3(new[0], new[1], new[2])",
"def coriolis_matrix(self):\n dot1 = squeeze(asarray(dot(self.M[0:3, 0:3], self.v[0:3]) + dot(self.M[0:3, 3:6], self.v[3:6])))\n dot2 = squeeze(asarray(dot(self.M[3:6, 0:3], self.v[0:3]) + dot(self.M[3:6, 3:6], self.v[3:6])))\n s1 = self.s(dot1)\n s2 = self.s(dot2)\n c = zeros((6, 6))\n c[0:3, 3:6] = -s1\n c[3:6, 0:3] = -s1\n c[3:6, 3:6] = -s2\n return c",
"def CalcCoriolisMatrix(self):\n q = self.plant.GetPositions(self.context)\n v = self.plant.GetVelocities(self.context)\n\n def Cv_fcn(v):\n self.plant_autodiff.SetPositions(self.context_autodiff, q)\n self.plant_autodiff.SetVelocities(self.context_autodiff, v)\n return self.plant_autodiff.CalcBiasTerm(self.context_autodiff)\n\n C = 0.5*jacobian(Cv_fcn,v)\n return C"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Solves Tx=b using the Levinson algorithm where T is apositivedefinite symmetric Toeplitz matrix b is a real vector | def levinson(r, b):
n = len(b)
y = zeros((n,))
x = zeros((n,))
# normalize the system so that the T matrix has diagonal of ones
r_0 = r/r[0]
b_0 = b/r[0]
if n == 1:
return b_0
y[0] = -r_0[1]
x[0] = b_0[0]
beta = 1
alpha = -r_0[1]
for k in range(0,n-1):
beta = (1 - alpha*alpha)*beta
mu = (b_0[k+1] - dot(r_0[1:k+2], x[k::-1])) /beta
x[0:k+1] = x[0:k+1] + mu*y[k::-1]
x[k+1] = mu
if k < n-2:
alpha = -(r_0[k+2] + dot(r_0[1:k+2], y[k::-1]))/beta
y[0:k+1] = y[0:k+1] + alpha * y[k::-1]
y[k+1] = alpha
return x | [
"def Backward_Euler_solver(func, mx, mt, L, T, kappa, u_0, u_T, bCond):\n x,_ = xt_points(mx, mt, L, T)\n u_j = U(func, x, L)\n u_jp1 = np.zeros(len(u_j))\n A_BE = tridiag_A(mx, mt, L, T, kappa)\n\n # Solve the PDE: loop over all time points\n for n in range(1, mt+1):\n # Backward Euler scheme in matrix form at inner mesh points\n #u_jp1 = np.linalg.solve(A_BE, u_j)\n u_jp1[1:-1] = ThomasSolver(A_BE, u_j[1:-1])\n # Boundary conditions\n u_jp1[0] = u_0; u_jp1[-1] = u_T\n # Update u_j\n u_j = u_jp1\n\n return x, u_j",
"def SelfDualNewtonSystem(A, b, c, e):\n \n n = A.shape[1]\n m = A.shape[0]\n \n b_bar = b - np.matmul(A,e)\n c_bar = c - e\n alpha = 1 + np.dot(c, e)\n beta = n + 2\n \n A_star = np.c_[A,-b,b_bar]\n C = np.zeros((n+2,n+2))\n C[0:n,n] = c\n C[n,0:n] = -C[0:n,n].T\n C[0:n,n+1] = -c_bar\n C[n+1,0:n] = -C[0:n,n+1].T\n C[n,n+1] = alpha\n C[n+1,n] = -C[n,n+1].T\n \n yA = np.r_[np.zeros((m,m)), -A_star.T, np.zeros((n+2, m))]\n xA = np.r_[A_star, C, np.eye(n+2)]\n sA = np.r_[np.zeros((m, n+2)), -np.eye(n+2), np.eye(n+2)]\n \n return np.c_[yA, xA, sA]",
"def stbinv(A, B, C, D, y, t):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[\n 1\n ] # the number of samples is the number of columns of the data matrix y\n\n # calculate system's dimensions: number of states and number of inputs\n m = B.shape[1] # number of inputs\n n = A.shape[0] # number of states\n\n # initialize the variable v (additional input)\n v = np.zeros((n, N)) # it will be important later\n\n # initializing the flag variable\n flag = 0\n # initializing the flag variable for the vrft method\n flag_vr = 0\n # initializing the counter of reduction steps done by the algorithm\n kround = 0\n\n # starting the loop of the reduction procedure\n while flag == 0:\n # run a step of the reduction order algorithm\n Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat = invredc(A, B, C, D, y, v)\n # increase the counter of reductions\n kround = kround + 1\n\n # preallocating the state vector of the inverse system\n xhat = np.zeros((nhat, N - kround)) # it must have N-kround samples\n # preallocating the calculated input\n uhat = np.zeros((m, N - kround))\n\n # defining the reduced time vector\n tt = t[:, 0 : N - kround]\n\n # test the conditions of invertibility\n if phat < m:\n # if this condition is true, then the algorithm has failed and it is not possible to find the inverse\n flag = 1\n flag_vr = 1\n # if this is the case, we print a message and end the execution\n # print('The inversion algorithm has failed')\n return uhat, tt, flag_vr\n else:\n if rhat == m:\n # ((rhat==m)&(rhat==phat)):\n # if this condition is true, then the algorithm is done. We can calculate the signal u\n flag = 2\n # calculating the inverse of the feedforward matrix\n # E=np.linalg.inv(Dhat)\n E = np.linalg.pinv(Dhat)\n else:\n # if none of the conditions above is true, then we need to proceed to another round of the reduction step of the algorithm\n A = Ahat\n B = Bhat\n C = Chat\n D = Dhat\n y = yhat\n v = vhat\n # after the reduction procedure is done, then the system can be inverted\n\n # calculating the dynamic matrix of the inverse system\n Ainv = Ahat - Bhat @ E @ Chat\n # eigenvalues of the inverse system's dynamic matrix\n wv, v = np.linalg.eig(Ainv) # w=eigenvalues, v=eigenvectors\n # calculating the input matrix of the inverse system\n Binv = Bhat @ E\n # calculating the output matrix of the inverse system\n Cinv = -E @ Chat\n # calculating the feedforward matrix of the inverse system\n Dinv = E\n\n # test if the inverse dynamic system is stable\n wbool = wv > 1\n wsum = np.sum(wbool)\n # test if wsum is greater than 1\n if wsum > 0:\n # if wsum is greater than 1, then, the inverse system is unstable, so we end the execution of the algorithm\n # print('The inverse system is unstable')\n flag_vr = 2\n return uhat, tt, flag_vr\n else:\n # if wsum=0, then the inverse system is stable, and we can calculate the input signal\n # calculate the first value for the output (t=0)\n uhat[:, 0] = Cinv @ xhat[:, 0] + Dinv @ yhat[:, 0]\n # calculate the states and the output of the inverse system\n for k in range(0, N - 1 - kround):\n xhat[:, k + 1] = Ainv @ xhat[:, k] + Binv @ yhat[:, k] + vhat[:, k]\n uhat[:, k + 1] = Cinv @ xhat[:, k + 1] + Dinv @ yhat[:, k + 1]\n\n return uhat, tt, flag_vr",
"def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)",
"def solve(matrix, b):\n lu_matrix = decompose_to_LU(matrix)\n # get supporting vector y\n y = np.matrix(np.zeros([lu_matrix.shape[0], 1]), dtype=np.float64)\n for i in range(y.shape[0]):\n y[i, 0] = b[i] - lu_matrix[i, :i] * y[:i]\n\n # get vector of answers x\n x = np.matrix(np.zeros([lu_matrix.shape[0], 1]))\n for i in range(1, x.shape[0] + 1):\n x[-i, 0] = (y[-i] - lu_matrix[-i, -i:] * x[-i:, 0]) / lu_matrix[-i, -i]\n\n return np.array(x.transpose()[0], dtype=np.float64)[0]",
"def triangular_solve(rowlist, b):\n x = zero_vec(rowlist[0].D)\n for i in reversed(range(len(rowlist))):\n x[i] = (b[i] - rowlist[i] * x)/rowlist[i][i]\n return x",
"def forward_committor_sensitivity(T, A, B, index):\n\n n = len(T)\n set_X = numpy.arange(n) # set(range(n))\n set_A = numpy.unique(A) # set(A)\n set_B = numpy.unique(B) # set(B)\n set_AB = numpy.union1d(set_A, set_B) # set_A | set_B\n notAB = numpy.setdiff1d(set_X, set_AB, True) # list(set_X - set_AB)\n m = len(notAB)\n\n K = T - numpy.diag(numpy.ones(n))\n\n U = K[numpy.ix_(notAB.tolist(), notAB.tolist())]\n\n v = numpy.zeros(m)\n\n # for i in xrange(0, m):\n # for k in xrange(0, len(set_B)):\n # v[i] = v[i] - K[notAB[i], B[k]]\n v[:] = v[:] - K[notAB[:], B[:]]\n\n qI = numpy.linalg.solve(U, v)\n\n q_forward = numpy.zeros(n)\n #q_forward[set_A] = 0 # double assignment.\n q_forward[set_B] = 1\n #for i in range(len(notAB)):\n q_forward[notAB[:]] = qI[:]\n\n target = numpy.eye(1, n, index)\n target = target[0, notAB]\n\n UinvVec = numpy.linalg.solve(U.T, target)\n Siab = numpy.zeros((n, n))\n\n for i in range(m):\n Siab[notAB[i]] = - UinvVec[i] * q_forward\n\n return Siab",
"def ridge_regression(y, tx, lambda_):\n N = tx.shape[0]\n a = tx.T.dot(tx) + 2 * N * lambda_ * np.identity(tx.shape[1])\n b = tx.T.dot(y)\n w = np.linalg.solve(a, b)\n loss = compute_loss_LS(y, tx, w) \n return loss, w",
"def SOR_Solve_Opt(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n omega = 1\n l = 5\n p = 2\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n x_new[row] -= A[row,column]*x_new[column]\n x_new[row] /= A[row,row]\n x_new[row] = (1.0-omega) * x[row] + omega*x_new[row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n #record change after iteration k\n if (l==iteration):\n dxl = np.linalg.norm(x_new-x)\n if (l + p == iteration):\n dxlp = np.linalg.norm(x_new-x)\n omega = 2.0/(1.0+np.sqrt(1-(dxlp/dxl)**(1.0/p)))\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new",
"def _solveX(L, U, b):\n m, n = L.shape\n # Forward Substitution\n y = list()\n y.insert(0, b[0]/L[0][0])\n for i in range(1, m):\n summ = 0\n for k in range(0, i):\n summ += L[i][k]*y[k]\n y.insert(i, (b[i]-summ)/(L[i][i]))\n\n # Backwards Substitution\n x = [0]*m\n x[m-1] = y[m-1] / U[m-1][m-1]\n for i in range(m - 2, -1, -1):\n summ = 0\n for k in range(i+1, n):\n summ += U[i][k]*x[k]\n x[i] = (y[i] - summ)/U[i][i]\n\n return x",
"def solve_lower_triangular(amat, b):\n return solve_triangular_base(amat, b, lower=True)",
"def project_L1_ball(x: \"fasta.linalg.Vector\", t: float) -> \"fasta.linalg.Vector\":\n # By Moreau's identity, we convert to proximal of dual problem (L-inf norm)\n return x - project_Linf_ball(x, t)",
"def solve_L(L, b):\n\n raise NotImplementedError",
"def linear_problem(A, b, eps=0.0001):\n\n M = A + eps * speye_like(A)\n return torch.solve(b if b.ndimension() >= 2 else torch.unsqueeze(b, -1),\n M if isdense(M) else M.to_dense())[0].squeeze()",
"def lu_solve(A, b):\n return A.from_ddm(A.to_ddm().lu_solve(b.to_ddm()))",
"def find_argmin_T(p_s, p_t, A_d,\n A, b):\n def f_error(x):\n A_tmp = np.reshape(x[0:9], newshape=(3,3))\n b_tmp = x[9:12]\n return(find_error(p_s, p_t, A_d,\n A_tmp, b_tmp))\n def flatten(A, b):\n # Flatten out A and b into x_0\n return(np.concatenate((np.reshape(A, newshape=(9,)), b)))\n x_0 = flatten(A, b)\n #sol = optimize.root(f_error, x_0, method='lm')\n print(\"minimizing the function now!!!\")\n sol = optimize.minimize(f_error, x_0)\n def expand(x):\n # Un-flattens x into the tuple of A and b\n return(np.reshape(x[0:9], newshape=(3,3)), x[9:12])\n\n A_tmp, b = expand(sol.x)\n print(\"==============\")\n print(\"A_tmp, before we make it near orthogonal\")\n print(A_tmp)\n print(\"its determinant\")\n print(np.linalg.det(A_tmp))\n print(\"==============\")\n #print(\"\")\n A = near_orthog(A_tmp)\n return(A, b)",
"def compute_twist(rbt):\n #YOUR CODE HERE\n R = rbt[:3,:3]\n orientation = eqf.find_omega_theta(R)# omega/theta\n v = eqf.find_v(orientation[0], orientation[1], trans).reshape(3,)\n return (v, orientation[0])",
"def RHS(y,t):\r\n\r\n return np.multiply(A.dot(y),ones-y)-beta*y",
"def eqp_kktfact(H, c, A, b):\n n, = np.shape(c) # Number of parameters\n m, = np.shape(b) # Number of constraints\n\n # Karush-Kuhn-Tucker matrix of coefficients.\n # Defined as in Nocedal/Wright \"Numerical\n # Optimization\" p.452 in Eq. (16.4).\n kkt_matrix = csc_matrix(bmat([[H, A.T], [A, None]]))\n # Vector of coefficients.\n kkt_vec = np.hstack([-c, -b])\n\n # TODO: Use a symmetric indefinite factorization\n # to solve the system twice as fast (because\n # of the symmetry).\n lu = linalg.splu(kkt_matrix)\n kkt_sol = lu.solve(kkt_vec)\n x = kkt_sol[:n]\n lagrange_multipliers = -kkt_sol[n:n+m]\n\n return x, lagrange_multipliers"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the log determinant of a positivedefinite symmetric toeplitz matrix. The determinant is computed recursively. The intermediate solutions of the Levinson recursion are expolited. | def toeplitz_slogdet(r):
n = len(r)
r_0 = r[0]
r = np.concatenate((r, np.array([r_0])))
r /= r_0 # normalize the system so that the T matrix has diagonal of ones
logdet = n*np.log(np.abs(r_0))
sign = np.sign(r_0)**n
if n == 1:
return (sign, logdet)
# now on is a modification of Levinson algorithm
y = zeros((n,))
x = zeros((n,))
b = -r[1:n+1]
r = r[:n]
y[0] = -r[1]
x[0] = b[0]
beta = 1
alpha = -r[1]
d = 1 + dot(-b[0], x[0])
sign *= np.sign(d)
logdet += np.log(np.abs(d))
for k in range(0,n-2):
beta = (1 - alpha*alpha)*beta
mu = (b[k+1] - dot(r[1:k+2], x[k::-1])) /beta
x[0:k+1] = x[0:k+1] + mu*y[k::-1]
x[k+1] = mu
d = 1 + dot(-b[0:k+2], x[0:k+2])
sign *= np.sign(d)
logdet += np.log(np.abs(d))
if k < n-2:
alpha = -(r[k+2] + dot(r[1:k+2], y[k::-1]))/beta
y[0:k+1] = y[0:k+1] + alpha * y[k::-1]
y[k+1] = alpha
return(sign, logdet) | [
"def fast_logdet(matrix):\n sign, ld = np.linalg.slogdet(matrix)\n if not sign > 0:\n return -np.inf\n return ld",
"def log_abs_det_jacobian(self, z):\n pre_u = self.u_ + self.u\n pre_w = self.w_ + self.w\n a = F.softplus(self.a + self.inv)\n w = F.softmax(pre_w, dim=3)\n u = F.softmax(pre_u, dim=3)\n # Perform computation\n pre_sigm = torch.sum(u * a * z, 3) + self.b\n sigm = torch.sigmoid(pre_sigm)\n x_pre = torch.sum(w * sigm, dim=3)\n x_pre_clipped = x_pre * (1 - self.eps) + self.eps * 0.5\n logj = F.log_softmax(pre_w, dim=3) + logsigmoid(pre_sigm) + logsigmoid(-pre_sigm) + torch.log(a)\n # n, d, d2, dh\n logj = logj + F.log_softmax(pre_u, dim=3)\n # n, d, d2, dh, d1\n logj = torch.log(torch.sum(torch.exp(logj),3))\n # n, d, d2, d1\n logdet_ = logj + np.log(1 - self.eps) - (torch.log(x_pre_clipped) + torch.log(-x_pre_clipped + 1))\n return logdet_",
"def log_det(chol):\n return 2. * tf.reduce_sum(tf.log(tf.matrix_diag_part(chol)), [-1])",
"def log_abs_det_jacobian(self, x, y):\n return torch.ones(x.size()[:-1], dtype=x.dtype, layout=x.layout, device=x.device) * \\\n self.scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1).sum(-1)",
"def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):\n if covariance_type == 'full':\n n_components, _, _ = matrix_chol.shape\n log_det_chol = (np.sum(np.log(\n matrix_chol.reshape(\n n_components, -1)[:, ::n_features + 1]), 1))\n\n elif covariance_type == 'tied':\n log_det_chol = (np.sum(np.log(np.diag(matrix_chol))))\n\n elif covariance_type == 'diag':\n log_det_chol = (np.sum(np.log(matrix_chol), axis=1))\n\n else:\n log_det_chol = n_features * (np.log(matrix_chol))\n\n return log_det_chol",
"def _inverse_log_det_jacobian(self, x):\n alpha, beta = self._get_alpha_beta()\n diff = x - self.x0\n r = tf.linalg.norm(diff, axis=-1, keepdims=True)\n h = 1. / (alpha + r)\n h_prime = -(h ** 2)\n beta_h = beta * h\n log_det_jacobian = tf.reduce_sum(\n (self.dim - 1) * tf.math.log1p(beta_h)\n + tf.math.log1p(beta_h + beta * h_prime * r), axis=-1)\n return log_det_jacobian",
"def lemma_logdet(B, M, yivars, L):\n \n S = np.dot(M.T * yivars, M) * L\n d = np.einsum('ii->i', S)\n d += 1\n \n s, logdetS = np.linalg.slogdet(S)\n assert s==1\n \n logdetC_fast = np.sum(np.log(yivars**-1))\n #s, logdetC = np.linalg.slogdet(np.diag(yivars**-1))\n #assert s==1\n #print(logdetC, logdetC_fast, np.allclose(logdetC, logdetC_fast))\n \n logdetB = logdetS + logdetC_fast\n \n return logdetB",
"def _gradLogDet(self, hyperparams,dK,columns =False ):\n KV = self.get_covariances(hyperparams)\n Si = KV['Si']\n if columns:\n d=(KV['Uc']*SP.dot(dK,KV['Uc'])).sum(0)\n RV = SP.dot(KV['Sr'],SP.dot(Si,d))\n if VERBOSE:\n #kd = SP.kron(KV['Sr'],d)\n kd = krondiag(KV['Sr'],d)\n #kd = krondiag_(KV['Sr'],d)\n RV_=SP.sum(kd*Si)\n check_dist(RV,RV_)\n else:\n #d=SP.dot(KV['Ur'].T,SP.dot(dK,KV['Ur'])).diagonal()\n d=(KV['Ur']*SP.dot(dK,KV['Ur'])).sum(0)\n RV = SP.dot(d,SP.dot(Si,KV['Sc']))\n if VERBOSE:\n #kd = SP.kron(d,KV['Sc'])\n kd=krondiag(d,KV['Sc'])\n #kd=krondiag_(d,KV['Sc'])\n RV_=SP.sum(kd*Si)\n check_dist(RV,RV_)\n return RV",
"def plogdet(K):\n egvals = eigvalsh(K)\n return npsum(log(egvals[egvals > epsilon]))",
"def determinant(A):\r\n # This is Crout's Algorithm.\r\n # U will remain zero in the lower left entries, and will be 1's along the diagonal.\r\n # L will remain zero in the upper right entries.\r\n # A = U L\r\n n = len(A)\r\n L = zeros(n) # Initialize with zeros Numerators for the lower triangular matrix\r\n U = zeros(n) # Initialize with zeros Numerators for the upper triangular matrix\r\n DL = ones(n) # Initialize with zeros Denominators for the lower triangular matrix\r\n DU = ones(n) # Initialize with zeros Denominators for the upper triangular matrix\r\n # L = [[0] * n] * n # Does not work because it initializes the matrix with references to the same lists\r\n # U = [[0] * n] * n # Does not work because it initializes the matrix with references to the same lists\r\n for j in range(0, n):\r\n assert len(A[j]) == n\r\n U[j][j] = 1 # set the diagonal entries of U to 1\r\n for i in range(j, n): # starting at L[j][j], solve j-th column of L\r\n tempL = A[i][j]\r\n tempDL = 1 # Temporary denominator for the lower triangular matrix\r\n for k in range(0, j):\r\n assert DL[i][k] != 0\r\n assert DU[k][j] != 0\r\n tempL = tempL * DL[i][k] * DU[k][j] - tempDL * L[i][k] * U[k][j]\r\n tempDL = tempDL * DL[i][k] * DU[k][j]\r\n L[i][j] = tempL\r\n DL[i][j] = tempDL\r\n for i in range(j + 1, n):# starting at U[j][j+1], solve j-th row of U\r\n tempU = A[j][i]\r\n tempDU = 1 # Temporary denominator for the upper triangular matrix\r\n for k in range(0, j):\r\n assert DU[k][i] != 0\r\n assert DL[j][k] != 0\r\n tempU = tempU * DU[k][i] * DL[j][k] - tempDU * L[j][k] * U[k][i]\r\n tempDU = tempDU * DU[k][i] * DL[j][k]\r\n U[j][i] = tempU * DL[j][j]\r\n if L[j][j] == 0:\r\n assert simplistic_determinant(A) == 0\r\n return 0 # The determinant is zero, so avoid dividing by zero by short circuiting the computation.\r\n DU[j][i] = tempDU * L[j][j]\r\n\r\n # Now calculate the determinant by multiplying the diagonal entries of the lower-left triangular matrix\r\n num = 1\r\n den = 1\r\n for i in range(0, n):\r\n assert U[i][i] == 1\r\n for j in range(0, i):\r\n assert U[i][j] == 0\r\n for j in range(i + 1, 3):\r\n assert L[i][j] == 0\r\n num *= L[i][i]\r\n den *= DL[i][i]\r\n # Now divide the denominator den from the numerator.\r\n # The numerator should evenly divide (assuming the input matrix A only had ints),\r\n # Was having trouble with den equaling 0. Fixed it by returning zero if any L[j][j] was 0. See above.\r\n assert den != 0\r\n det1 = num // den\r\n det2 = simplistic_determinant(A)\r\n if det1 != det2:\r\n print(\"Mismatch! \", det1, det2)\r\n return det2",
"def determinant(self):\n if self.n_rows != self.n_cols:\n raise Exception('Matrix is not square')\n if self.n_rows == 2:\n return (self.data[0][0] * self.data[1][1]) - (self.data[1][0] * self.data[0][1])\n else:\n echelon, ops = reduce_to_echelon(self.data.copy(), True)\n swaps = sum([1 if row[0] == 'swap' else 0 for row in ops])\n return math.prod([echelon[i][i] for i in range(len(echelon))]) * (-1) ** swaps",
"def determinant(self):\n r, s = self.signature_pair_of_matrix()\n return (-1)**s*prod([ G.determinant() for G in self._local_symbols ])",
"def determinant(A):\n \n total = 0\n\n if len(A) == 1:\n return A[0][0]\n\n for col in range(len(A)):\n Asub = A[1:]\n for j in range(len(A)-1):\n Asub[j] = Asub[j][:col] + Asub[j][col+1:]\n subdet = determinant(Asub)\n sign = (-1) ** (col % 2)\n total += sign * A[0][col] * subdet\n return total",
"def determinant(A):\n \n total = 0\n\n if len(A) == 1:\n return A[0][0]\n\n for col in range(len(A)):\n Asub = A[1:]\n for j in range(len(A)-1):\n Asub[j] = Asub[j][:col] + Asub[j][col+1:]\n subdet = determinant(Asub)\n sign = (-1) ** (col % 2)\n total += sign * A[0][col] * subdet\n\n return total",
"def det(A):\n # Section 1: Establish n parameter and copy A\n n = len(A)\n AM = A[:]\n\n # Section 2: Row manipulate A into an upper triangle matrix\n for fd in range(n): # fd stands for focus diagonal\n if AM[fd][fd] == 0:\n AM[fd][fd] = 1.0e-18 # Cheating by adding zero + ~zero\n for i in range(fd+1, n): # skip row with fd in it.\n crScaler = AM[i][fd] / AM[fd][fd] # cr stands for \"current row\".\n for j in range(n): # cr - crScaler * fdRow, one element at a time.\n AM[i][j] = AM[i][j] - crScaler * AM[fd][j]\n\n # Section 3: Once AM is in upper triangle form ...\n product = 1.0\n for i in range(n):\n product *= AM[i][i] # ... product of diagonals is determinant\n\n return product",
"def task5_determinant(matrix):\n return np.linalg.det(matrix)",
"def determinant(self):\n if not self.is_square():\n raise(ValueError, \"Cannot calculate determinant of non-square matrix.\")\n if self.h > 2:\n raise(NotImplementedError, \"Calculating determinant not implemented for matrices largerer than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n return self.g[0][0] # a 1x1 matrix\n else:\n return ((self.g[0][0] * self.g[1][1]) - (self.g[0][1] * self.g[1][0])) # a 2x2 matrix\n # TODO - your code here",
"def logit_deriv(y):\n# if y.any() < 0.0 or y.any() > 1.0:\n# raise Exception\n\n return y*(1-y)",
"def determinant(self):\n if not self.is_square():\n raise ValueError('Matrix must be square')\n tmp_matrix = list(map(list, deepcopy(self.data))) # change tuples in data to list, for make them mutable\n # bring the tmp_matrix into a triangular form\n for index in range(self._width):\n for i in range(index + 1, self._width):\n if tmp_matrix[index][index] == 0: # if diagonal element equal zero, change it to approximately zero\n tmp_matrix[index][index] = 1.0e-14 # in other case will be ZeroDivisionError\n scalar = tmp_matrix[i][index] / tmp_matrix[index][index]\n for j in range(self._height):\n tmp_matrix[i][j] = tmp_matrix[i][j] - scalar * tmp_matrix[index][j]\n # the determinant equal to the product of diagonal elements of the triangular matrix\n result = 1\n for index in range(self._width):\n result *= tmp_matrix[index][index]\n # need to approximate the result to avoid the tails of the product of floating point numbers\n return round(result, 10)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Preprocessing needed for toeplitz_inverse_multiplication() | def toeplitz_inverse_multiplication_prep(T_column):
phi=1
psi=2
assert phi != 0
assert psi != 0
assert phi != psi
n = len(T_column)
x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )
y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )
x_0 = x[0]
D_phi = (phi**(1/n))**np.arange(0,n)
D_psi = (psi**(1/n))**np.arange(0,n)
Lambda_1 = fft(D_psi*x)
Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))
Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))
Lambda_4 = fft(D_phi*x)
return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4) | [
"def transformPreMultiply(*args):\n return _almathswig.transformPreMultiply(*args)",
"def bd_toeplitz_inverse_multiplication(u, *arrs):\n \n y = zeros(shape(u))\n n_start = 0\n n_end = 0\n for t in arrs:\n n_start = n_end\n n_end += len(t[3]) # len(t[3]) is the length of the block\n y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t)\n assert len(y) == n_end\n return y",
"def comp_inverse_un(switch,nbTermes,maxInt,precision):\n P = poly_random(switch,nbTermes,maxInt)\n P = P - P(0) + P.parent().one()\n return inverse_un_series(P,precision)",
"def de_mult(self,z):\n if isinstance(z,np.ndarray) and z.size>1:\n assert np.all(np.diff(z)>0.)\n return (z+1.)**(3.*(1.+self.w))",
"def transform(p, m):\n return (p[0] * m[0] + p[1] * m[4] + p[2] * m[8] + m[12],\n p[0] * m[1] + p[1] * m[5] + p[2] * m[9] + m[13],\n p[0] * m[2] + p[1] * m[6] + p[2] * m[10] + m[14])",
"def reconstruct(Pf, Pt, Pz):\n return np.matmul(np.transpose(Pt), Pz[:, np.newaxis] * Pf)",
"def _build_m_z_inv(self):\n\n M = self.M_z.copy()\n\n self.M_z_inv = lin.inv(M).tocsc()",
"def revert_output_preprocessing(self, output):\n return np.exp(output)",
"def test_preprocessing_unrotate(self):\n pass",
"def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod",
"def test__inverse_transform_continuous(self):",
"def complex_inverse(c1,cr):",
"def inverse_fisher_z_transform(z):\r\n return ((e ** (2 * z)) - 1.) / ((e ** (2 * z)) + 1.)",
"def find_mult_inverse(self, p):\n for i in self.table:\n prod = self.mult(p,i)\n while prod and prod[-1] == 0:\n prod.pop()\n if prod == [1]:\n return self.simplified(i)\n break\n return None",
"def sbf_inverse_uniprocessor(x):\n return x",
"def inf_zero_one_to_triple(p,q,r):\n ### infinity = [1,0], zero = [0,1], one = [1,1] in CP^1\n p1,p2=p\n q1,q2=q\n r1,r2=r\n M = [[p1,q1],[p2,q2]]\n Minv = matrix2_inv(M)\n [mu,lam] = matrix_mult_vector(matrix2_inv([[p1,q1],[p2,q2]]), [r1,r2])\n return [[mu*p1, lam*q1],[mu*p2, lam*q2]]",
"def u_inverse(U, check=False, verbose=False):\n\n m, n = U.shape\n\n if verbose:\n print(\"u_inverse\")\n print(shortstr(U))\n\n #items = []\n leading = []\n for row in range(m):\n #cols = numpy.where(U[row, :])[0]\n cols = U.get_cols(row)\n #print(\"row %d, cols %s\"%(row, cols))\n if not len(cols):\n break\n col = cols[0]\n assert U[row, col]\n leading.append(col)\n\n #print(\"leading:\", leading)\n assert sorted(leading) == leading\n assert len(set(leading)) == len(leading)\n\n U1 = zeros(n, m)\n\n #print( shortstr(U))\n\n # Work backwards\n i = len(leading)-1 # <= m\n while i>=0:\n\n j = leading[i]\n #print( \"i=%d, j=%d\"%(i, j))\n r = Fraction(1, U[i, j])\n U1[j, i] = r\n\n #print( \"U, U1, U*U1:\")\n #print( shortstrx(U, U1, dot(U, U1)))\n\n k = i-1\n while k>=0:\n #print( \"dot\")\n #print( shortstr(U[k,:]))\n #print( shortstr(U1[:,i]))\n r = dot(U[k, :], U1[:, i])\n #print( \"=\", r)\n if r != 0:\n j = leading[k]\n s = U[k, j]\n #print( \"set\", j, i)\n U1[j, i] = -Fraction(r, s)\n #print( shortstr(U1[:,i]))\n assert dot(U[k, :], U1[:, i]) == 0\n k -= 1\n i -= 1\n\n return U1",
"def preprocess_input(input_pixels):\n\n input_pixels[:, 1] /= 31.0\n input_pixels[:, 2] += 1.0\n input_pixels[:, 2] /= 2.2\n return input_pixels",
"def mul_inplace(a, b):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
matrix multiplication with the inverse of a blockdiagonal matrix having Toeplitz blocks. y = T u Analogous to toeplitz_inverse_multiplication() | def bd_toeplitz_inverse_multiplication(u, *arrs):
y = zeros(shape(u))
n_start = 0
n_end = 0
for t in arrs:
n_start = n_end
n_end += len(t[3]) # len(t[3]) is the length of the block
y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t)
assert len(y) == n_end
return y | [
"def chol_inverse_diag(t):\n (uu, nrows) = t.shape\n B = np.zeros((uu, nrows), dtype=\"float64\")\n B[1, nrows - 1] = 1.0 / t[1, nrows - 1] ** 2\n B[0, nrows - 1] = -t[0, nrows - 1] * B[1, nrows - 1] / t[1, nrows - 2]\n for j in reversed(range(nrows - 1)):\n tjj = t[1, j]\n B[1, j] = (1.0 / tjj - t[0, j + 1] * B[0, j + 1]) / tjj\n B[0, j] = -t[0, j] * B[1, j] / t[1, j - 1]\n return B",
"def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)",
"def inverse_matrice(T):\n a,b,c,d = T[0][0],T[0][1],T[1][0],T[1][1]\n det = a*d-b*c\n aa,bb,cc,dd = d/det,-b/det,-c/det,a/det\n Tinv = [[aa,bb],[cc,dd]]\n return Tinv",
"def test_BlockMatrix_2x2_inverse_numeric():\n M = Matrix([[1, 2], [3, 4]])\n # rank deficient matrices that have full rank when two of them combined\n D1 = Matrix([[1, 2], [2, 4]])\n D2 = Matrix([[1, 3], [3, 9]])\n D3 = Matrix([[1, 4], [4, 16]])\n assert D1.rank() == D2.rank() == D3.rank() == 1\n assert (D1 + D2).rank() == (D2 + D3).rank() == (D3 + D1).rank() == 2\n\n # Only A is invertible\n K = BlockMatrix([[M, D1], [D2, D3]])\n assert block_collapse(K.inv()).as_explicit() == K.as_explicit().inv()\n # Only B is invertible\n K = BlockMatrix([[D1, M], [D2, D3]])\n assert block_collapse(K.inv()).as_explicit() == K.as_explicit().inv()\n # Only C is invertible\n K = BlockMatrix([[D1, D2], [M, D3]])\n assert block_collapse(K.inv()).as_explicit() == K.as_explicit().inv()\n # Only D is invertible\n K = BlockMatrix([[D1, D2], [D3, M]])\n assert block_collapse(K.inv()).as_explicit() == K.as_explicit().inv()",
"def getInverseMatrix(self) -> CMatrix4:\n ...",
"def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi",
"def inverse(self) -> \"Matrix\":\n new_mtx = Matrix(self._nrows, self._ncols)\n # step 1. Create a matrix of minors, taking the determinants of the submatrices\n for row in range(self._nrows):\n for col in range(self._ncols):\n val = self.get_sub_matrix(row, col).get_determinant()\n # step 2. Apply the checkerboard of alternating signs to the matrix values\n val *= ((-1) ** (row + col))\n new_mtx.set(row, col, val)\n # step 3. Transpose the matrix\n new_mtx = new_mtx.transpose()\n # step 4. Multiply the matrix by (1/d), where 'd' is the original matrix's determinant\n d = self.get_determinant()\n if d == 0:\n raise MatrixHasNoInverseError()\n return new_mtx.scale(1 / d)",
"def inverse(self):\n inverse = []\n inverse_row = [] \n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n if self.h == 1:\n inverse_row . append(1/float(self.g[0][0]))\n inverse . append (inverse_row)\n return Matrix(inverse)\n \n # TODO - your code here\n if self.h == 2:\n # Intializing variable to calculate determination\n a = self.g[0][0]\n b = self.g[0][1]\n c = self.g[1][0]\n d = self.g[1][1]\n det_A = (a * d) - (b * c)\n \n # Intializing variable to implement[[d, -b], [-c, a]]\n self.g[0][0] = d\n self.g[0][1] = - b\n self.g[1][0] = - c\n self.g[1][1] = a\n \n # For loop to calculate the inverse \n for i in range(len(self.g)):\n inverse_row = []\n for j in range(len(self.g[0])):\n inverse_row . append ((1 / float(det_A)) * self.g[i][j])\n inverse . append(inverse_row) \n return Matrix(inverse)",
"def star_inverse(m):\n if (m.shape[0] != m.shape[1]):\n raise ValueError(\"m must be a square matrix! \")\n return star(np.eye(m.shape[0]) - m)",
"def inverse(self):\n ((c, ms, x),(s, c2, y), (z1, z2, o)) = self.matrix\n return Transform([[c, s, (-c*x)-(s*y)],\n [-s, c, (s*x)-(c*y)],\n [0, 0, 1]])",
"def u_inverse(U, check=False, verbose=False):\n\n m, n = U.shape\n\n if verbose:\n print(\"u_inverse\")\n print(shortstr(U))\n\n #items = []\n leading = []\n for row in range(m):\n #cols = numpy.where(U[row, :])[0]\n cols = U.get_cols(row)\n #print(\"row %d, cols %s\"%(row, cols))\n if not len(cols):\n break\n col = cols[0]\n assert U[row, col]\n leading.append(col)\n\n #print(\"leading:\", leading)\n assert sorted(leading) == leading\n assert len(set(leading)) == len(leading)\n\n U1 = zeros(n, m)\n\n #print( shortstr(U))\n\n # Work backwards\n i = len(leading)-1 # <= m\n while i>=0:\n\n j = leading[i]\n #print( \"i=%d, j=%d\"%(i, j))\n r = Fraction(1, U[i, j])\n U1[j, i] = r\n\n #print( \"U, U1, U*U1:\")\n #print( shortstrx(U, U1, dot(U, U1)))\n\n k = i-1\n while k>=0:\n #print( \"dot\")\n #print( shortstr(U[k,:]))\n #print( shortstr(U1[:,i]))\n r = dot(U[k, :], U1[:, i])\n #print( \"=\", r)\n if r != 0:\n j = leading[k]\n s = U[k, j]\n #print( \"set\", j, i)\n U1[j, i] = -Fraction(r, s)\n #print( shortstr(U1[:,i]))\n assert dot(U[k, :], U1[:, i]) == 0\n k -= 1\n i -= 1\n\n return U1",
"def right_inverse(mat):\n return mat.T @ np.linalg.inv(mat @ mat.T)",
"def inv(transform_matrix):\n\n r = transform_matrix[0:3, 0:3]\n t = transform_matrix[0:3, 3]\n t_inv = -1 * r.T.dot(t)\n transform_inv = np.eye(4)\n transform_inv[0:3, 0:3] = r.T\n transform_inv[0:3, 3] = t_inv\n\n return transform_inv",
"def inverse(self):\n\t\tif not self.is_square():\n\t\t\traise(ValueError, \"Non-square Matrix does not have an inverse.\")\n\t\tif self.row_len > 2:\n\t\t\traise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\t\n\t\t# TODO - your code here\n\t\tinverse = [] \n\t\tif self.row_len == 1: \n\t\t\tinverse.append([1 / self.g[0][0]])\n\t\telif self.row_len == 2: \n\t\t\t# If the matrix is 2x2, check that the matrix is invertible\n\t\t\tif self.g[0][0] * self.g[1][1] == self.g[0][1] * self.g[1][0]:\n\t\t\t\traise ValueError('The matrix is not invertible.')\n\t\t\telse:\n\t\t\t\tdet = self.determinant() \n\t\t\t\ta = self.g[0][0]\n\t\t\t\tb = self.g[0][1]\n\t\t\t\tc = self.g[1][0]\n\t\t\t\td = self.g[1][1]\n\t\t\t\tinverse = [[d, -b],[-c, a]]\n\n\t\t\t\tfor i in range(len(inverse)):\n\t\t\t\t\tfor j in range(len(inverse[0])):\n\t\t\t\t\t\tinverse[i][j] = det * inverse[i][j]\n\t\t\t\t\n\t\treturn Matrix(inverse)",
"def block_inverse(iA, B, C, D):\n logger = logging.getLogger(__name__)\n logger.info(\"------- block_inverse(iA, B, C, D) -------\")\n logger.debug(\"Input matrix types are: {} {} {} {}\".format(type(iA), type(B),\n type(C), type(D)))\n logger.debug(\"Input matrix shapes are: {} {} {} {}\".format(\n np.array(iA).shape,\n np.array(B).shape,\n np.array(C).shape,\n np.array(D).shape))\n n = iA.shape[0]\n result = np.zeros([n+1, n+1])\n CA = np.dot(C, iA).reshape([1,n])\n AB = np.dot(iA, B).reshape([n,1])\n det = D-CA.dot(B)\n result[:n,:n] = iA + AB.dot(CA) / det\n result[n:n+1,:n] = -CA / det\n result[:n,n:n+1] = -AB / det\n result[n,n] = 1 / det\n return result",
"def mulI( self ):\n return Mat( -self.m11, self.m12, self.m13,\n self.m21, -self.m22, self.m23,\n self.m31, self.m32, -self.m33 )",
"def inverse(self):\n if self.determinant() != 0:\n ops = reduce_to_red_echelon(self.data.copy(), True)[1]\n matrix = identity_matrix(self.n_rows).data\n \n if ops:\n if isinstance(ops[0], str):\n ops = [ops]\n \n for op in ops:\n if op[0] == 'swap':\n matrix = row_swap(matrix, op[1], op[2])\n elif op[0] == 'multiplication':\n matrix = row_multiply(matrix, op[1], op[2])\n elif op[0] == 'subtract':\n matrix = row_subtract(matrix, op[1], op[2], op[3])\n else:\n raise ValueError('Row operation not recognized')\n else:\n raise ValueError('Matrix has a determinant of 0 and is not invertible')\n return Matrix(matrix)",
"def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod",
"def inv(mat):\n return mat.inv()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse a single line of csvtoarrow output. Raise RuntimeError if a line cannot be parsed. (We can't recover from that because we don't know what's happening.) | def _parse_csv_to_arrow_warning(line: str) -> I18nMessage:
for pattern, builder in _ERROR_PATTERNS:
match = pattern.match(line)
if match:
return builder(**match.groupdict())
raise RuntimeError("Could not parse csv-to-arrow output line: %r" % line) | [
"def __parse_csv_line(self, csv_line):\n if Case.label_column == -1:\n raise Exception(\"Cannot parse CSV file until properties of file have been specified to the Case class\")\n\n # Loop through each comma-separated item in the line, after first truncating the newline from the end\n for idx, item in enumerate(csv_line[0:len(csv_line) - 1].split(\",\")):\n if idx == Case.label_column:\n self.label = item # Save column tagged as label as a string\n else:\n try:\n self.attributes.append(float(item)) # Parse each column tagged as an attribute as a float\n except ValueError:\n logging.error(\"Cannot parse attribute \\\"%s\\\" into a floating-point number\" % item)\n raise ParseCsvError(item, csv_line)\n self.attributesAlreadyExamined.append(False)\n self.predicted = None",
"def test_parseLine2(mocker):\n \n # given: setup test framework\n worker = Worker()\n testString = \"11/11/19,Brighter Futures,12000\"\n \n # when:\n result = worker.parseLineCSV(testString)\n \n # then: (Using PyTruth assertions)\n AssertThat(result).IsNone()",
"def test_parseLine1(mocker):\n \n # given: setup test framework\n worker = Worker()\n testString = \"12Nov2019,Teacher,Brighter Futures,12000\"\n expectedResult = {\n 'date': '2019-11-12',\n 'job_title': 'Teacher',\n 'company_name': 'Brighter Futures',\n 'salary': 12000\n }\n \n # when:\n result = worker.parseLineCSV(testString)\n \n # then:\n assert result == expectedResult",
"def parse_csv(line: str) -> str: \n\n try:\n\n Q_column_headers = ['trade_dt','arrival_tm','rec_type','symbol','event_tm', \\\n 'event_seq_nb','exchange','bid_pr','bid_size','ask_pr','ask_size']\n T_column_headers = ['trade_dt','arrival_tm','rec_type','symbol','event_tm', \\\n 'event_seq_nb','exchange','trade_pr']\n \n bid_pr = None\n bid_size = None\n ask_pr = None\n ask_size = None\n trade_pr = None\n\n record = line.split(\",\")\n if record[2] == 'Q':\n record_dict = dict(zip(Q_column_headers, record))\n bid_pr = Decimal(record_dict['bid_pr'])\n bid_size = int(record_dict['bid_size'])\n ask_pr = Decimal(record_dict['ask_pr'])\n ask_size = int(record_dict['ask_size'])\n else:\n record_dict = dict(zip(T_column_headers, record))\n trade_pr = Decimal(record_dict['trade_pr'])\n\n trade_dt = datetime.datetime.strptime(record_dict['trade_dt'], '%Y-%m-%d')\n rec_type = record_dict['rec_type']\n symbol = record_dict['symbol']\n exchange = record_dict['exchange']\n event_tm = datetime.datetime.strptime(record_dict['event_tm'], '%Y-%m-%d %H:%M:%S.%f')\n event_seq_nb = int(record_dict['event_seq_nb'])\n arrival_tm = datetime.datetime.strptime(record_dict['arrival_tm'], '%Y-%m-%d %H:%M:%S.%f')\n partition = rec_type\n\n return [trade_dt, rec_type, symbol, exchange, event_tm, event_seq_nb, arrival_tm, \\\n trade_pr, bid_pr, bid_size, ask_pr, ask_size, partition]\n \n except Exception as e:\n \n # If anything goes wrong, output empty record with \"B\" partition\n # empty_str = \",\" * (COMMON_EVENT_COLUMN_COUNT - 1)\n # return f\"{empty_str}B\".split(\",\")\n\n return [ None for i in range(COMMON_EVENT_COLUMN_COUNT - 1) ] + ['B']",
"def parse(cls, line):\r\n raise NotImplementedError",
"def from_csv_line(line):\r\n return line.strip().split(',')",
"def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert",
"def processLine(line):\n assert line[-1] == '\\n'\n line = line[:-1]\n (ticker, date, open_, high, low, close, volume, dividends, splits,\n adj_open, adj_high, adj_low, adj_close, adj_volume) = line.split(',')\n # Check date.\n try:\n tmp_date = datetime.datetime.strptime(date, '%Y-%m-%d')\n except ValueError:\n assert False, 'unsupported date format in line: %s' % line\n assert checkOHLC(open_, high, low, close), 'invalid OHLC in line: %s' % line\n assert checkOHLC(adj_open, adj_high, adj_low, adj_close), (\n 'invalid adj OHLC in line: %s' % line)\n assert checkVolume(volume), 'invalid volume in line: %s' % line\n if adj_volume != '':\n adj_volume = getNonNegFloat(adj_volume)\n assert adj_volume is not None, 'invalid adj volume in line: %s' % line\n if dividends != '':\n dividends = getNonNegFloat(dividends)\n assert dividends is not None, 'invalid dividends in line: %s' % line\n if splits != '':\n splits = getNonNegFloat(splits)\n assert splits is not None, 'invalid splits in line: %s' % line\n\n return ticker, date, line",
"def line_to_row(line):\n m = line_re.match(line)\n if m:\n return Row(hostname=m.group(1), transferred=m.group(2))\n else:\n return None",
"def line_parser(one_line):\n one_line_splitted = one_line.split('\\t')\n\n if not len(one_line_splitted) == 3:\n raise Exception('Not enough columns in data file')\n else:\n res = {\n \"epoch\": one_line_splitted[0],\n \"seconds\": one_line_splitted[1],\n \"description\": one_line_splitted[2].replace('#','').replace('\\n','').strip()\n }\n\n return res",
"def parse(self, line):\n try:\n (year, month, day, hour, minute, second, microseconds, offset_hour, offset_minute, source, process, logentry) = re.match('^(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d)T(\\d\\d):(\\d\\d):(\\d\\d)\\.([\\d]+)\\+(\\d\\d):(\\d\\d) ([a-z]+)\\[([a-zA-Z0-9_.]+)\\]: ([0-9a-z-A-Z\\-_\\.\\[\\]:\\?\\#\\\",/\\ ={}\\'\\(\\)<>]+)$', line).groups()\n except:\n pass\n \n try:\n parsed_data = dict()\n parsed_data['timestamp'] = \" \".join([\"-\".join([year, month, day]), \":\".join([hour, minute, second])])\n parsed_data['log_time'] = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n parsed_data['log_source'] = source\n parsed_data['log_type'] = process\n except (AttributeError, UnboundLocalError):\n PARSE_ERRORS.append(line)\n return False\n\n #TODO: This still needs work on spaces in values surrounded by \" \" \n if parsed_data['log_source'] == \"heroku\":\n if logentry.__len__() > 1:\n logentry = re.sub(', ', ',', logentry)\n line_chunks = re.split(' ', logentry)\n for chunk in line_chunks:\n line_chunks = re.split('=', chunk)\n if line_chunks.__len__() > 2:\n #fwd and path are a little clunky to parse\n pass\n elif line_chunks.__len__() > 1:\n parsed_data[line_chunks[0]] = line_chunks[1]\n else:\n pass\n else:\n return False\n else:\n # TODO: [app] \n # Needs parsing. Do that here.\n return False\n\n return parsed_data",
"def from_cvs_to_obj(csv_line):\n arg_arr = csv_line.split(\",\")\n if len(arg_arr) != 8:\n print(\"Error : Few data are missing\", file=stderr)\n exit(-1)\n return Flight(arg_arr[0], arg_arr[1], arg_arr[2], arg_arr[3], arg_arr[4], arg_arr[5], arg_arr[6], arg_arr[7])",
"def parse_line(line):\n pattern = re.compile(REGEX)\n result = pattern.match(line, re.I)\n if result != None:\n part = result.groups()\n return LogEntry(part[0], part[3], part[4], part[5], part[6])\n return None",
"def parse_line(self, line):\n \n if self._multiline_key:\n self.while_multi_line(line)\n \n elif self.EXTENDS_LINE.match(line):\n self.extends_line_match(self.EXTENDS_LINE.match(line), line)\n \n elif self.FROM_FILE_LINE.match(line):\n self.from_file_line_match(self.FROM_FILE_LINE.match(line), line)\n \n elif self.ONE_LINE.match(line):\n self.one_line_match(self.ONE_LINE.match(line), line)\n \n elif self.SANDBOX_FILE_LINE.match(line):\n self.sandbox_file_line_match(self.SANDBOX_FILE_LINE.match(line), line)\n \n elif self.MULTI_LINE.match(line):\n self.multi_line_match(self.MULTI_LINE.match(line), line)\n \n elif self.COMMENT_LINE.match(line):\n self.dic['__comment'] += '\\n' + self.COMMENT_LINE.match(line).group('comment')\n \n elif not self.EMPTY_LINE.match(line):\n raise SyntaxErrorPL(self.path_parsed_file, line, self.lineno)",
"def read_from_line(cls, line):\n raise RuntimeError(\"%s feature can't be \"\n \"read from a line!\", str(cls))",
"def process_line():\n pass",
"def _parse_csv(cls, filepath):\n cls._filename = basename(filepath)\n with codecs.open(filepath, mode='rb', encoding='ascii') as fp:\n # Determine type of EVE CSV file and parse\n line1 = fp.readline()\n fp.seek(0)\n\n if line1.startswith(\"Date\"):\n return cls._parse_average_csv(fp)\n elif line1.startswith(\";\"):\n return cls._parse_level_0cs(fp)",
"def process_line(self, line):\n columns = line.split('|')\n\n if len(line) == 0 or len(columns) < 16:\n return None # empty line or malformed line\n\n cmte_id, name, zip_code = columns[0], columns[7], columns[10][:5]\n transaction_dt, transaction_amt = columns[13], columns[14]\n other_id = columns[15]\n\n if len(other_id) > 0 or len(transaction_amt) == 0 or len(cmte_id) == 0 or len(name) == 0 or len(zip_code) < 5:\n return None # malformed data fields, ignore this line\n transaction_date = string_to_date(transaction_dt)\n if transaction_date is None:\n return None # 'TRANSACTION_DT' is an invalid date\n\n try:\n if self.repeat_donor(name, zip_code, transaction_date.year):\n # this record is from a repeat donor in any prior calendar year\n amount = float(transaction_amt)\n key = RecipientZipYear(cmte_id, zip_code, transaction_date.year)\n if key not in self.running_percentile:\n self.running_percentile[key] = RunningPercentile(self.percentile)\n self.running_percentile[key].add(amount)\n return self.print_record(key)\n else:\n return None # this record is not from a repeat donor\n except:\n return None # exception may comes from malformed line, so just ignore this line",
"def test_import_csv_file_a_row(self):\n\n complete_data = parse_csv_file(self.test_file_path)\n self.assertEqual(self.expected_data, complete_data[0])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true if we should fastskip converting a pa.Array. The _true_ reason for this function is to test whether an Array contains "Inf" or "NaN". A numberconversion library will parse those. But _this_ library is for Workbench, and Workbench doesn't support NaN/Inf. So this function helps us decide _not_ to autoconvert a column when the intent isn't perfectly clear. Assume `arr` is of type `utf8` or a dictionary of `utf8`. Assume there are no gaps hidden in null values in the buffer. (It's up to the caller to prove this.) | def _utf8_chunk_may_contain_inf_or_nan(chunk: pyarrow.Array) -> bool:
_, offsets_buf, data_buf = chunk.buffers()
offsets = array.array("i")
assert offsets.itemsize == 4
offsets.frombytes(offsets_buf)
if sys.byteorder != "little":
offsets.byteswap() # pyarrow is little-endian
offset0 = offsets[chunk.offset]
offsetN = offsets[chunk.offset + len(chunk)] # len(offsets) == 1 + len(chunk)
b = data_buf[offset0:offsetN].to_pybytes()
return SCARY_BYTE_REGEX.search(b) is not None | [
"def asarray_chkfinite(a):\n a = asarray(a)\n if (a.dtype.char in typecodes['AllFloat']) \\\n and (_nx.isnan(a).any() or _nx.isinf(a).any()):\n raise ValueError, \"array must not contain infs or NaNs\"\n return a",
"def contains_inf(arr, node=None, var=None):\n if not _is_numeric_value(arr, var):\n return False\n elif getattr(arr, \"dtype\", \"\") in discrete_dtypes:\n return False\n return np.isinf(np.nanmax(arr)) or np.isinf(np.nanmin(arr))",
"def pyarrow_array(arr, nan_to_null=False):\n import numpy as np\n import pyarrow as pa\n if nan_to_null and issubclass(arr.dtype.type,\n (np.floating, np.complexfloating)):\n isnan = np.isnan(arr)\n if isnan.any():\n pa_nul = pa.py_buffer(get_bitmap(isnan))\n return pa.Array.from_buffers(pa.from_numpy_dtype(arr.dtype),\n arr.size,\n [pa_nul, pa.py_buffer(arr)])\n return pa.Array.from_buffers(pa.from_numpy_dtype(arr.dtype),\n arr.size,\n [None, pa.py_buffer(arr)])",
"def is_nonnegative_arr(arr):\n out = True\n if not np.all(arr > -1e-6):\n print('some negative entries in ' + str(arr))\n out = False\n return out",
"def __where_not_nan(arr: np.ndarray):\n return np.where(np.isfinite(arr))",
"def is_float_array(val):\n return is_np_array(val) and issubclass(val.dtype.type, np.floating)",
"def nonans(array):\n return array[~np.isnan(array)]",
"def _isnan_check(array):\n return np.isnan(array).any()",
"def _is_double(arr):\n\n # Figure out which dtype for data\n if arr.dtype == np.float32:\n return False\n elif arr.dtype == np.float64:\n return True\n else:\n raise ValueError(\"Only float32 or float64 dtypes are supported\")",
"def _is_strictly_increasing(array: np.ndarray) -> bool:\n return (np.diff(array) > 0).all().astype(bool)",
"def isfillvalue(a):\n a = numpy.asarray(a)\n if a.dtype.kind == 'i':\n mask = a == -999999999\n elif a.dtype.kind == 'f':\n mask = numpy.isnan(a)\n elif a.dtype.kind == 'S':\n mask = a == ''\n else:\n raise ValueError('Fill value not known for dtype %s' % a.dtype)\n return mask",
"def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False",
"def is_float(arr, *args):\n return arr.dtype is np.dtype(np.float)",
"def is_positive_arr(arr):\n out = True\n if not np.all(arr > 0):\n print('some negative neg or zero entries in ' + str(arr))\n out = False\n return out",
"def _has_array(self) -> Optional[bool]:\n\n if self.__has_array is not None:\n return self.__has_array\n\n # False -ves (array length is 1) are an acceptable compromise to extensive checking\n\n # W --- 01:145038 34:092243 --:------ 1FC9 006 07230906368E\n # I --- 01:145038 --:------ 01:145038 1FC9 018 07000806368E-FC3B0006368E-071FC906368E\n # I --- 01:145038 --:------ 01:145038 1FC9 018 FA000806368E-FC3B0006368E-FA1FC906368E\n # I --- 34:092243 --:------ 34:092243 1FC9 030 0030C9896853-002309896853-001060896853-0010E0896853-001FC9896853\n if self.code == _1FC9:\n self.__has_array = self.verb != RQ # safe to treat all as array, even len=1\n\n elif self.verb != I_ or self.code not in CODES_WITH_ARRAYS:\n self.__has_array = False\n\n elif self.len == CODES_WITH_ARRAYS[self.code][0]: # NOTE: can be false -ves\n self.__has_array = False\n\n else:\n _len = CODES_WITH_ARRAYS[self.code][0]\n assert (\n self.len % _len == 0\n ), f\"{self} << array has length ({self.len}) that is not multiple of {_len}\"\n assert (\n self.src.type in (\"12\", \"22\") or self.src == self.dst\n ), f\"{self} << array is from a non-controller (01)\"\n assert (\n self.src.type not in (\"12\", \"22\") or self.dst == NON_DEV_ADDR\n ), f\"{self} << array is from a non-controller (02)\"\n self.__has_array = True\n\n # I --- 10:040239 01:223036 --:------ 0009 003 000000 # not array\n # I --- 01:102458 --:------ 01:102458 0009 006 FC01FF-F901FF\n # I --- 01:145038 --:------ 01:145038 0009 006 FC00FF-F900FF\n # I 034 --:------ --:------ 12:126457 2309 006 017EFF-027EFF\n # I --- 01:223036 --:------ 01:223036 000A 012 081001F40DAC-091001F40DAC # 2nd fragment\n # I 024 --:------ --:------ 12:126457 000A 012 010001F40BB8-020001F40BB8\n # I --- 02:044328 --:------ 02:044328 22C9 018 0001F40A2801-0101F40A2801-0201F40A2801\n # I --- 23:100224 --:------ 23:100224 2249 007 007EFF7EFFFFFF # can have 2 zones\n # I --- 02:044328 --:------ 02:044328 22C9 018 0001F40A2801-0101F40A2801-0201F40A2801\n # I --- 02:001107 --:------ 02:001107 3150 010 007A-017A-027A-036A-046A\n\n return self.__has_array",
"def spikes(arr):\n arr = np.array(arr)\n if (arr.size == 0) or flat(arr) or monotonic(arr):\n return False\n arr = normalize(arr)\n spikes = np.where(arr > arr.mean())[0]\n rest = np.ones_like(arr, dtype=bool)\n rest[spikes] = False\n return flat(arr[rest]) and flat(np.diff(arr[spikes]))",
"def __isZeroEverywhere(self, array):\n epsilon = numpy.finfo( type(array[0]) ).eps\n boolList = numpy.less_equal(numpy.abs(array), epsilon)\n\n for b in boolList:\n if not b:\n return False\n return True",
"def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)",
"def test_dtype_None(self):\n array = np.array([[0, 1, 2], [2, 1, 0]]).T\n self.assertTrue(to_ndarray(array, None, safe=True).flags.contiguous,\n msg='to_ndarray: Non contiguous arrays are not being consolidated when dtype is None')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the config information with new dropout values. | def update_dropout(info,
dropout,
dropout_type,
prop_name):
if dropout_type == "schnet_dropout":
info["model_params"]["schnet_dropout"] = dropout
elif dropout_type == "chemprop_dropout":
info["model_params"]["cp_dropout"] = dropout
elif dropout_type == "readout_dropout":
# if it's in the readout layers, find the dropout
# layers in the readout dictionary and update them
readout = info["model_params"]["readoutdict"]
layer_dics = readout[prop_name]
for layer_dic in layer_dics:
if layer_dic["name"] == "Dropout":
layer_dic["param"]["p"] = dropout
info["model_params"]["readoutdict"] = {prop_name: layer_dics}
elif dropout_type == "attention_dropout":
info["model_params"]["boltzmann_dict"]["dropout_rate"] = dropout
else:
info["model_params"][dropout_type] = dropout | [
"def conf_update(self):\n pass",
"def update(self):\n self.save_config_file()",
"def update_config(self, config):\n self.config = config\n self.rate_dropout = nn.Dropout(config.DROPOUT_RATES)\n self.pos_encoder.update_config(config)\n self.transformer_encoder.update_config(config)\n self.src_mask = {} # Clear cache",
"async def _update_config(self):\n if self.config['data'] is None or self.config_expired:\n data = await self.get_data(self.url_builder('configuration'))\n self.config = dict(data=data, last_update=datetime.now())",
"def _updateConfigWidgets(self):\n self._ui.configModelCoordinates_fieldChooser.setField(self._fitter.getModelCoordinatesField())\n self._ui.configDataCoordinates_fieldChooser.setField(self._fitter.getDataCoordinatesField())\n self._ui.configMarkerGroup_fieldChooser.setField(self._fitter.getMarkerGroup())",
"def changeDropout(self,dropout):\n self.dropout = dropout",
"def update_values(self, config, dest):\n for section in config.keys():\n if section in dest:\n for option in config[section].keys():\n if option in (\"desc\", \"outline\"):\n continue\n\n if option in dest[section]:\n dest[section][option][\"value\"] = config[section][option][\n \"value\"\n ]\n\n # else:\n # dest[section][option] = config[section][option]\n\n # else:\n # dest[section] = config[section]",
"def update_global_config(self, config, **kwargs):\n pass",
"def update_config(self, data):\n self.config.data = dict_merge(self.config.data, data)\n self.config.save()",
"def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)",
"def OnConfigChange(self, input):\n return",
"def image_config_update(self, new_variants, new_facets, new_mediators):\n\n if new_variants is not None:\n self.cfg.variants.update(new_variants)\n if new_facets is not None:\n self.cfg.facets = new_facets\n if new_mediators is not None:\n self.cfg.mediators = new_mediators\n self.save_config()",
"def update_sensor_options(val):\n sensor_drop.options = sensor_dict[spacecraft_drop.value]",
"def update_data(self):\n self.global_config.update()\n self.gl_opts_grid.foreach(self.gl_opts_grid.remove)\n self.build_rhvoice_conf_page(self.gl_opts_grid)",
"def update(self, other):\n self._config.update(other)",
"def update_config():\n config.update_config(config.usr_config_file, config.def_config_file)",
"def update_config(self, config):\n # Save a copy of the current config.\n newconfig = deepcopy(self.config)\n # Merge the new config into the current one.\n newconfig.merge(config)\n # Save the combined config as self.config, which triggers the traits\n # events.\n self.config = newconfig",
"def update_ranges(self):\n new_ranges = self.get_z_ranges()\n self.config.update_ranges(new_ranges)",
"def update_config(self):\n dwam_params = {\n 'genesis_color_sets': self.genesis_color_sets,\n 'color_set_states': self.color_set_states\n }\n self.config['dwam'] = dwam_params"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the config information with the number of attention heads. | def update_heads(info,
heads):
info["model_params"]["boltzmann_dict"]["num_heads"] = heads
# Concatenate the fingerprints produced by the different heads
info["model_params"]["boltzmann_dict"]["head_pool"] = "concatenate"
readoutdict = info["model_params"]["readoutdict"]
feat_dim = info["model_params"]["mol_basis"]
for key, lst in readoutdict.items():
for i, dic in enumerate(lst):
if "param" in dic and "in_features" in dic.get("param", {}):
# make sure that the input dimension to the readout is equal to
# `heads * feat_dim`, where `feat_dim` is the feature dimension
# produced by each head
readoutdict[key][i]["param"]["in_features"] = feat_dim * heads
break
info["model_params"]["readoutdict"] = readoutdict | [
"def increment_config_version(self):\n self.config_version += 1\n if self.config_version > MAX_CONFIG_VERSION:\n self.config_version = 1",
"def _make_attention(self):\n return self.config.attention_cls(\n num_heads=self.config.num_heads,\n dtype=self.config.dtype,\n qkv_features=self.config.qkv_dim,\n head_dim=self.config.head_dim,\n kernel_init=self.config.attention_kernel_init,\n bias_init=self.config.bias_init,\n use_bias=False,\n broadcast_dropout=True,\n rescale_logits=self.config.rescale_logits,\n dropout_rate=self.config.attention_dropout_rate,\n use_extra_logit=self.config.use_extra_logit,\n float32_logits=self.config.attention_float32_logits)",
"def conf_update(self):\n pass",
"def test_attention_net(self):\n\n # Checks that torch and tf embedding matrices are the same\n with tf1.Session().as_default() as sess:\n assert np.allclose(\n relative_position_embedding(20, 15).eval(session=sess),\n relative_position_embedding_torch(20, 15).numpy())\n\n # B is batch size\n B = 32\n # D_in is attention dim, L is memory_tau\n L, D_in, D_out = 2, 16, 2\n\n for fw, sess in framework_iterator(session=True):\n\n # Create a single attention layer with 2 heads\n if fw == \"torch\":\n # Create random Tensors to hold inputs and outputs\n x = torch.randn(B, L, D_in)\n y = torch.randn(B, L, D_out)\n\n value_labels = torch.randn(B, L, D_in)\n memory_labels = torch.randn(B, L, D_out)\n\n attention_net = TorchGTrXLNet(\n observation_space=gym.spaces.Box(\n low=float(\"-inf\"), high=float(\"inf\"), shape=(D_in, )),\n action_space=gym.spaces.Discrete(D_out),\n num_outputs=D_out,\n model_config={\"max_seq_len\": 2},\n name=\"TestTorchAttentionNet\",\n num_transformer_units=2,\n attn_dim=D_in,\n num_heads=2,\n memory_tau=L,\n head_dim=D_out,\n ff_hidden_dim=16,\n init_gate_bias=2.0)\n\n init_state = attention_net.get_initial_state()\n\n # Get initial state and add a batch dimension.\n init_state = [np.expand_dims(s, 0) for s in init_state]\n seq_lens_init = torch.full(\n size=(B, ), fill_value=L, dtype=torch.int32)\n\n # Torch implementation expects a formatted input_dict instead\n # of a numpy array as input.\n input_dict = {\"obs\": x}\n self.train_torch_full_model(\n attention_net,\n input_dict, [y, value_labels, memory_labels],\n num_epochs=250,\n state=init_state,\n seq_lens=seq_lens_init)\n # Framework is tensorflow or tensorflow-eager.\n else:\n x = np.random.random((B, L, D_in))\n y = np.random.random((B, L, D_out))\n\n value_labels = np.random.random((B, L, 1))\n memory_labels = np.random.random((B, L, D_in))\n\n # We need to create (N-1) MLP labels for N transformer units\n mlp_labels = np.random.random((B, L, D_in))\n\n attention_net = GTrXLNet(\n observation_space=gym.spaces.Box(\n low=float(\"-inf\"), high=float(\"inf\"), shape=(D_in, )),\n action_space=gym.spaces.Discrete(D_out),\n num_outputs=D_out,\n model_config={\"max_seq_len\": 2},\n name=\"TestTFAttentionNet\",\n num_transformer_units=2,\n attn_dim=D_in,\n num_heads=2,\n memory_tau=L,\n head_dim=D_out,\n ff_hidden_dim=16,\n init_gate_bias=2.0)\n model = attention_net.trxl_model\n\n # Get initial state and add a batch dimension.\n init_state = attention_net.get_initial_state()\n init_state = [np.tile(s, (B, 1, 1)) for s in init_state]\n\n self.train_tf_model(\n model, [x] + init_state,\n [y, value_labels, memory_labels, mlp_labels],\n num_epochs=200,\n minibatch_size=B)",
"def update_config(self, config):\n self.config = config\n self.rate_dropout = nn.Dropout(config.DROPOUT_RATES)\n self.pos_encoder.update_config(config)\n self.transformer_encoder.update_config(config)\n self.src_mask = {} # Clear cache",
"def update(self, config):\n self.n_topics = config['n_topics'] \n self.n_passes = config['n_passes'] \n self.min_docfreq = config['min_docfreq'] \n self.max_docfreq = config['max_docfreq']\n self.ngrams = config['ngrams'] \n self.n_words = config['n_words'] \n self.topic_range = config['topic_range'] \n self.ext_stop_words = config['ext_stop_words']",
"def update(self, rxn_probs):\n pass",
"def config_changed(self):\n self.config_version += 1\n self.driver.config_changed()",
"def test_multi_head_attention(self):\n # B is batch size\n B = 1\n # D_in is attention dim, L is memory_tau\n L, D_in, D_out = 2, 32, 10\n\n for fw, sess in framework_iterator(\n frameworks=(\"tfe\", \"torch\", \"tf\"), session=True):\n # Create a single attention layer with 2 heads.\n if fw == \"torch\":\n\n # Create random Tensors to hold inputs and outputs\n x = torch.randn(B, L, D_in)\n y = torch.randn(B, L, D_out)\n\n model = TorchMultiHeadAttention(\n in_dim=D_in, out_dim=D_out, num_heads=2, head_dim=32)\n\n self.train_torch_layer(model, x, y, num_epochs=500)\n\n # Framework is tensorflow or tensorflow-eager.\n else:\n x = np.random.random((B, L, D_in))\n y = np.random.random((B, L, D_out))\n\n inputs = tf.keras.layers.Input(shape=(L, D_in))\n\n model = tf.keras.Sequential([\n inputs,\n MultiHeadAttention(\n out_dim=D_out, num_heads=2, head_dim=32)\n ])\n self.train_tf_model(model, x, y)",
"def vqa_attention_base():\n hparams = common_hparams.basic_params1()\n hparams.batch_size = 2\n hparams.use_fixed_batch_size = True,\n hparams.optimizer = \"Adam\"\n hparams.optimizer_adam_beta1 = 0.9\n hparams.optimizer_adam_beta2 = 0.999\n hparams.optimizer_adam_epsilon = 1e-8\n hparams.weight_decay = 0\n hparams.clip_grad_norm = 0.\n hparams.initializer = \"uniform_unit_scaling\"\n hparams.initializer_gain = 2.\n hparams.learning_rate = 0.5\n hparams.learning_rate_schedule = \"legacy\"\n hparams.learning_rate_warmup_steps = 0\n hparams.learning_rate_decay_scheme = \"exp\"\n hparams.learning_rate_decay_rate = 0.5\n hparams.learning_rate_decay_steps = 50000\n\n # not used hparams\n hparams.label_smoothing = 0.\n hparams.multiply_embedding_mode = \"\"\n\n hparams.dropout = 0.5\n hparams.norm_type = \"layer\"\n hparams.layer_postprocess_sequence = \"nd\"\n hparams.layer_prepostprocess_dropout = 0.5\n\n # add new hparams\n # preprocess\n hparams.add_hparam(\"resize_side\", 512)\n hparams.add_hparam(\"height\", 448)\n hparams.add_hparam(\"width\", 448)\n hparams.add_hparam(\"distort\", True)\n\n hparams.add_hparam(\"train_resnet\", False)\n hparams.add_hparam(\"rnn_type\", \"lstm\")\n hparams.add_hparam(\"num_rnn_layers\", 1)\n hparams.add_hparam(\"max_question_length\", 15)\n # lstm hidden size\n hparams.hidden_size = 512\n\n hparams.add_hparam(\"attn_dim\", 512)\n hparams.add_hparam(\"num_glimps\", 2)\n\n hparams.add_hparam(\"num_mlp_layers\", 1)\n hparams.add_hparam(\"mlp_dim\", 1024)\n\n return hparams",
"def _InitAttentionParams(self, atten_tpl):\n p = self.params\n\n if isinstance(p.num_heads, list) != isinstance(atten_tpl, list):\n raise ValueError('p.num_heads and p.atten_tpl should both be lists '\n f'or both scalars for {p.name} num_heads={p.num_heads}.')\n if isinstance(p.num_heads, list) and (len(p.num_heads) != len(atten_tpl)):\n raise ValueError('num_heads and atten_tpl should both be lists '\n 'of the equal sizes: '\n f'{len(p.num_heads)} vs {len(atten_tpl)}')\n\n def _SetCommonParams(params, name, num_heads):\n # Raise warning if self.params override params from atten_tpl\n for key in ['input_dim', 'hidden_dim', 'num_heads', 'atten_dropout_prob']:\n if params.Get(key) is not p.Get(key):\n tf.logging.warning('attention param {} overriding: {} -> {}'.format(\n key, params.Get(key), p.Get(key)))\n if params.name is not name:\n tf.logging.warning('attention param name overriding: {} -> {}'.format(\n params.name, name))\n params.name = name\n params.input_dim = p.input_dim\n params.hidden_dim = p.hidden_dim\n params.num_heads = num_heads\n params.atten_dropout_prob = p.atten_dropout_prob\n if isinstance(p.num_heads, list):\n params.proj_tpl.make_output_proj_no_op = True\n # Each dim per head is now divided among all heads\n dim_per_head = p.hidden_dim // sum(p.num_heads)\n params.proj_tpl.dim_per_head = dim_per_head\n params.dim_per_head = dim_per_head\n params.hidden_dim = p.hidden_dim // len(p.num_heads)\n return params\n\n if isinstance(p.num_heads, list):\n params_list = []\n for i in range(len(atten_tpl)):\n params = atten_tpl[i].Copy()\n params = _SetCommonParams(params, 'mixed_atten_{}'.format(i),\n p.num_heads[i])\n params_list.append(params)\n params = params_list\n else:\n params = atten_tpl.Copy()\n params = _SetCommonParams(params, 'multihead_atten', p.num_heads)\n return params",
"def update_config(self, config):\n # Save a copy of the current config.\n newconfig = deepcopy(self.config)\n # Merge the new config into the current one.\n newconfig.merge(config)\n # Save the combined config as self.config, which triggers the traits\n # events.\n self.config = newconfig",
"def hw_config(self, n):\n self._program += super().hw_config(n)\n self._check_pc()\n return",
"def configure(self, game_config):\n self.num_players = game_config['game_num_players']",
"def update_counts(self) -> None:\n ...",
"def _on_config_msg(self, msg):\n self._config_updates.add(msg)",
"def n_configs(self):\n return self._faux._n_configs",
"def update_config(\n self,\n **spacy_training_config,\n ) -> None:\n self.config[\"training\"].update(spacy_training_config)",
"def updateSizeHead(self, size): \n self.avatarConfiguration[\"headSize\"] = size\n self.paintHead()\n self.paintHair()\n if (self.avatarConfiguration[\"mask\"]):\n self.generateMask(\"imgUpload.png\")\n self.paintMask()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update a general parameter that's in the main info dictionary. | def update_general(info, key, val):
info["model_params"][key] = val | [
"def update_parameter(self, param, val, force=False):\n self._update_dict[param] = val\n if force:\n self._cur_val[param] = None",
"def update_params(self):",
"def updateParam(self, name, value):\n params = self.params\n params[name]['value'] = value\n self.params = params",
"def update_parameter(self, name, freq, value):\n if name not in self._parameters.keys():\n self.add_parameter(name, [freq], [value])\n else:\n param = self.get_parameter(name)\n param.update_value(freq, value)",
"def update_param(self, update_param):\n\n self._update_param = update_param",
"def update_algo_parameter(self, parameter_name, new_parameter_value):\n self.algo.update_algo_parameter(parameter_name, new_parameter_value)",
"def update_param_info(param_info, config, is_user_config=False):\n if 'parameters' not in config:\n return\n params = config['parameters']\n for name in params:\n val = params[name]\n if not is_user_config:\n # If this is not a user-provided configuration, we disallow parameter redefinition.\n if name in param_info:\n raise ConfigurationError(\n \"Parameter info update error.\"\n \" Parameter redefinition is not allowed for non-user configuration.\"\n \" This is a system configuration error that must not happen.\"\n \" Parameter %s=%s, new parameter definition (value) is %s\" % (name, str(param_info[name]), val)\n )\n if isinstance(val, dict):\n # This is a complete parameter definition with name, value and description.\n if 'val' not in val:\n raise ConfigurationError(\n \"Parameter info update error.\"\n \" Parameter that is defined by a dictionary must contain 'val' field that\"\n \" defines its default value. Found this definition: %s=%s\" % (name, val)\n )\n if name not in param_info:\n param_info[name] = copy.deepcopy(val) # New parameter, set it info object.\n # TODO what about parameter type and description?\n else:\n logging.warn(\n \" Parameter (%s) entirely redefines existing parameter (%s).\"\n \" Normally, only value needs to be provided.\"\n \" We will proceed but you may want to fix this.\",\n json.dumps(val),\n json.dumps(param_info[name])\n )\n param_info[name]['val'] = val['val'] # Existing parameter from user configuration, update its value\n else:\n # Just parameter value\n val_type = 'str' if isinstance(val, basestring) or isinstance(val, list) else type(val).__name__\n if name not in param_info:\n param_info[name] = {\n 'val': val,\n 'type': val_type,\n 'desc': \"No description for this parameter provided (it was automatically converted from its value).\"\n }\n else:\n param_info[name]['val'] = val\n # Do final validations\n if 'type' in param_info[name] and param_info[name]['type'] not in ('int', 'str', 'float', 'bool'):\n raise ConfigurationError(\n \"Parameter info update error.\"\n \" Parameter has invalid type = '%s'.\"\n \" Parameter definition is %s = %s\" % (param_info[name]['type'], name, param_info[name])\n )\n if 'type' not in param_info[name] or 'desc' not in param_info[name]:\n logging.warn(\n \"Parameter definition does not contain type ('type') and/or description ('desc').\"\n \" You should fix this. Parameter definition is\"\n \" %s = %s\", name, param_info[name]\n )",
"def UpdateParameters(self, param):\n\n for i, attribute in enumerate(self._fit_key.keys()):\n if attribute in param.keys():\n # Set attribute according to if it is a range or not\n if ';' in self._fit_key[attribute]:\n varmin = float(min(self._fit_key[attribute].split(';')))\n varmax = float(max(self._fit_key[attribute].split(';')))\n var = ROOT.RooRealVar(\n attribute,\n attribute,\n varmin,\n varmax)\n param[attribute] = var\n else:\n param[attribute] = float(self._fit_key[attribute])\n\n info(\n 'Change default value of {} (= {}) for signal PDF'\n .format(attribute, self._fit_key[attribute]))",
"def update_params(self, d):\n for k, v in d.items():\n if k in self[\"parameters\"]:\n self[\"parameters\"][k].update(v)",
"def updateParameters(self, parameters):\r\n super(Tool, self).updateParameters(parameters)",
"def _update_params(self):\n pass",
"def updateParamAnypoint(self):\n self.parent.ui.lineEdit_beta.setText(\"%.2f\" % self.parent.beta)\n self.parent.ui.lineEdit_alpha.setText(\"%.2f\" % self.parent.alpha)\n self.parent.ui.spinBox_zoom.setValue(self.parent.zoom)",
"def _update_parameter_map(self):\n\n self._position_to_param = []\n self._mw_kwargs = {}\n for p in self._mw_fit_parameters.keys():\n if self._mw_fit_parameters[p].fixed:\n self._mw_kwargs[p] = self._mw_fit_parameters[p].value\n else:\n self._mw_kwargs[p] = None\n self._position_to_param.append(p)\n\n self._mw_kwargs.update(self._mw_other_arguments)",
"def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()",
"def update_values(self, to_update):\n for key, value in kwargs.iteritems():\n self.params[key] = value\n # update the possibly dependent parameters\n self.set_filenames()",
"def edit_parameter(request, parameter, **_kwargs):\n pass",
"def update_params(self):\r\n parameters = dict()\r\n # Take the first value for all parameters\r\n for key, value in self.total_params.items():\r\n parameters[key] = value[0]\r\n # Update model\r\n self.model = self.inst(random_state=RANDOM_SEED, **parameters)",
"def post_parameter_update(self) -> None:",
"def update_parameter(self, name: str, value: tp.Any) -> None:\n\n assert self._scope_params is not None\n\n if name not in self._scope_params:\n raise ValueError(f\"types.Parameter {name} not found in {self}.\")\n\n if self.is_initializing():\n return\n\n parameter = self._scope_params[name]\n assert isinstance(parameter, types.Parameter)\n parameter.value = value"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct generalized extreme value distribution. The parameters `loc`, `scale`, and `concentration` must be shaped in a way that supports broadcasting (e.g. `loc + scale` + `concentration` is valid). | def __init__(self,
loc,
scale,
concentration,
validate_args=False,
allow_nan_stats=True,
name='GeneralizedExtremeValue'):
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([loc, scale, concentration],
dtype_hint=tf.float32)
loc = tensor_util.convert_nonref_to_tensor(
loc, name='loc', dtype=dtype)
scale = tensor_util.convert_nonref_to_tensor(
scale, name='scale', dtype=dtype)
concentration = tensor_util.convert_nonref_to_tensor(
concentration, name='concentration', dtype=dtype)
dtype_util.assert_same_float_dtype([loc, scale, concentration])
# Positive scale is asserted by the incorporated GEV bijector.
self._gev_bijector = gev_cdf_bijector.GeneralizedExtremeValueCDF(
loc=loc, scale=scale, concentration=concentration,
validate_args=validate_args)
# Because the uniform sampler generates samples in `[0, 1)` this would
# cause samples to lie in `(inf, -inf]` instead of `(inf, -inf)`. To fix
# this, we use `np.finfo(dtype_util.as_numpy_dtype(self.dtype).tiny`
# because it is the smallest, positive, 'normal' number.
super(GeneralizedExtremeValue, self).__init__(
distribution=uniform.Uniform(
low=np.finfo(dtype_util.as_numpy_dtype(dtype)).tiny,
high=tf.ones([], dtype=dtype),
allow_nan_stats=allow_nan_stats),
# The GEV bijector encodes the CDF function as the forward,
# and hence needs to be inverted.
bijector=invert_bijector.Invert(
self._gev_bijector, validate_args=validate_args),
parameters=parameters,
name=name) | [
"def gaussian(mu, wid, x):\n return np.exp(-((x - mu) / (0.6005612 * wid))**2)",
"def func_full_exp(x, c1, c2, c3, c4, c5, c6, c7):\n x = np.power(10, x)\n thermalCore = c1 * np.sqrt(x) * np.exp(-c2 * x)\n a = map(lambda y: 0 if y < c5 else 1, x)\n b = map(lambda y: 0 if y < c6 else 1, x)\n #b1 = map(lambda y: 1 - y, b)\n a = np.array(a)\n b = np.array(b)\n b1 = 1.0 - b\n powerLaw = c3 * a * np.power(x, -c4) * (b1 + b * np.exp(-c7 * (x - c6)))\n #print thermalCore + powerLaw\n return np.log10(thermalCore + powerLaw)",
"def gaussian(x,amp,cen,wid):\n \n z = (x-cen)/wid \n return amp*np.exp(-z**2/2.)",
"def _gaussian(self, c, sigma):\n d = 2*pi*sigma*sigma\n ax = exp(-power(self._neigx-c[0], 2)/d)\n ay = exp(-power(self._neigy-c[1], 2)/d)\n return outer(ax, ay) # the external product gives a matrix",
"def _gaussian(self, c, sigma):\n d = 2*sigma*sigma\n ax = exp(-power(self._xx-self._xx.T[c], 2)/d)\n ay = exp(-power(self._yy-self._yy.T[c], 2)/d)\n return (ax * ay).T # the external product gives a matrix",
"def make_supercell(coords, lattice, size, min_size=-5) -> np.ndarray:\n a, b, c = lattice\n\n xyz_periodic_copies = []\n xyz_periodic_copies.append(coords)\n min_range = -3 # we aren't going in the minimum direction too much, so can make this small\n max_range = 20 # make this large enough, but can modify if wanting an even larger cell\n\n for x in range(-min_range, max_range):\n for y in range(0, max_range):\n for z in range(0, max_range):\n if x == y == z == 0:\n continue\n add_vector = x*a + y*b + z*c\n xyz_periodic_copies.append(coords + add_vector)\n\n # Combine into one array\n xyz_periodic_total = np.vstack(xyz_periodic_copies)\n\n # Filter out all atoms outside of the cubic box\n new_cell = xyz_periodic_total[np.max(xyz_periodic_total[:,:3], axis=1) < size]\n new_cell = new_cell[np.min(new_cell[:,:3], axis=1) > min_size]\n\n return new_cell",
"def do_extreme(self,magnitude='magnitude',tp_optional='tp_optional',direction_optional='direction_optional',tm_optional='tm_optional',water_depth_optional='water_depth_optional',\\\n args={'Fitting distribution':{'Weibull':True,'Gumbel':False,'GPD':False,'GEV':False},\n 'Method':{'pkd':False,'pwm':False,'mom':False,'ml':True},\n 'Slope fitting distribution':{'Weibull':True,'Gumbel':False},\n 'Slope treshold':0.005,\n 'Return period':[1,10,25,50,100],\n 'Estimate Hmax & Cmax RPVs':{'On':False,'Off':True},\n 'threshold type':{'percentile':True,'value':False},\n 'threshold value':95.0,\n 'Directional':{'On':True,'Off':False},\n 'Minimum number of peaks over threshold': 30,\n 'Minimum time interval between peaks (h)':24.0,\n 'Direction binning':{'centered':True,'not-centered':False},\n 'Direction interval': 45.,\n 'Time blocking':{'Annual':True,'Seasonal (South hemisphere)':False,'Seasonal (North hemisphere)':False,'Monthly':False},\n 'Display peaks':{'On':True,'Off':False},\n 'Display CDFs':{'On':True,'Off':False},\n 'Water depth':5000.0,\n 'folder out':os.getcwd()\n }):\n\n display_message()",
"def getEllipsoidGaussian(x, y, z, c, v1, sigma1, v2, sigma2, v3, sigma3):\n c = np.array(c, dtype=float).flatten()\n v1 = np.array(v1, dtype=float).flatten()\n v1 /= np.sqrt(np.sum(v1**2))\n v2 = np.array(v2, dtype=float).flatten()\n v2 /= np.sqrt(np.sum(v2**2))\n v3 = np.array(v3, dtype=float).flatten()\n v3 /= np.sqrt(np.sum(v3**2))\n X = np.array([x, y, z]).T\n print(X.shape)\n X -= c[None, :]\n d1 = (X.dot(v1))**2/(sigma1**2)\n d2 = (X.dot(v2))**2/(sigma2**2)\n d3 = (X.dot(v3))**2/(sigma3**2)\n return np.exp(-d1)*np.exp(-d2)*np.exp(-d3)",
"def eq_xscale(trng_dset, trgt_dset):\n pdb.set_trace()\n temp1 = add_x(trng_dset, start=1)\n temp2 = add_x(trgt_dset, start=1)\n a = temp1.shape[0]\n b = temp2.shape[0]\n lcm = a * b / fractions.gcd(a, b)\n x = lcm / a\n y = lcm / b\n temp1[:, 0] = temp1[:, 0].dot(x)\n temp2[:, 0] = temp2[:, 0].dot(y)\n output = interpolate(temp1), interpolate(temp2)\n return np.asarray(output)",
"def InelasticThreshold(Ecut, Ethreshold, xsAtEcut):\n print(Ecut, Ethreshold, xsAtEcut)\n __the_const = xsAtEcut / math.sqrt(Ecut - Ethreshold)\n\n def __sqrtThingee(E, *args):\n if E < Ethreshold:\n return 0.0\n else:\n return __the_const * math.sqrt(E - Ethreshold)\n __this_grid = copy.copy(Egrid) + [Ecut, Ethreshold]\n __this_grid.sort()\n return XYs1d.XYs1d.createFromFunction(\n defaultAxes(xName='E', xUnit='eV', yName='Sigma(E)', yUnit='b'),\n Xs=__this_grid,\n func=__sqrtThingee,\n parameters=[],\n accuracy=accuracy,\n biSectionMax=20,\n checkForRoots=False,\n infill=1,\n safeDivide=1)",
"def min(self, e, extra_constraints=(), signed=False, exact=None):\n raise NotImplementedError()",
"def min_max_scale(X, full_data_range, scale_range):\n\n # Set range vars for proper scaling among all protein coordinate data\n full_data_min, full_data_max = full_data_range\n scale_min, scale_max = scale_range\n\n # Scale coordinate data based on scaling variables defined\n X_std = (X - full_data_min) / (full_data_max - full_data_min)\n X_scaled = X_std * (scale_max - scale_min) + scale_min\n\n return X_scaled",
"def __new__(cls, minx, miny, minz, maxx, maxy, maxz):\n # Coerce bounds to floats, and nones to infs\n kwargs = locals()\n for b, inf in zip(('min', 'max'),\n (-np.inf, np.inf)):\n for axis in 'xyz':\n bound = b + axis\n value = kwargs[bound]\n kwargs[bound] = inf if value is None else float(value)\n \n kwargs.pop('cls') # must be passed positionally\n return super(cls, cls).__new__(cls, **kwargs)",
"def bigaussian(mu, wid, x, m = 0.5):\n lx = x.shape[0]\n ix = np.where(x == mu)[0][0]\n \n y = np.ones(lx)\n y[0:ix] = gaussian(mu, wid * m, x[0:ix])\n y[ix+1:lx] = gaussian(mu, wid * (1 - m), x[ix+1:lx]) \n \n return y",
"def __init__(self,\n loc,\n concentration,\n validate_args=False,\n allow_nan_stats=True,\n name=\"InverseGaussian\"):\n parameters = dict(locals())\n with tf.name_scope(name, values=[loc, concentration]):\n self._loc = tf.convert_to_tensor(loc, name=\"loc\")\n self._concentration = tf.convert_to_tensor(concentration,\n name=\"concentration\")\n with tf.control_dependencies([\n tf.assert_positive(self._loc),\n tf.assert_positive(self._concentration)] if validate_args else []):\n self._loc = tf.identity(self._loc, name=\"loc\")\n self._concentration = tf.identity(self._concentration,\n name=\"concentration\")\n tf.assert_same_float_dtype([self._loc, self._concentration])\n super(InverseGaussian, self).__init__(\n dtype=self._loc.dtype,\n reparameterization_type=tf.distributions.NOT_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n graph_parents=[self._loc, self._concentration],\n name=name)",
"def ecoMinMaxConsts(model, gen_dict, prod_vars, on_vars, reserve_vars):\n #No on_vars for nonstandard generation.\n for name, iHr in on_vars.keys():\n reserve = grb.quicksum(reserve_vars[name, iHr, type] \n for type in generator.GenUnit.RESERVE_PRODUCTS)\n if gen_dict[name].eco_max[iHr] < TOL:\n model.addConstr(on_vars[name, iHr] == 0 )\n model.addConstr(prod_vars[name, iHr] == 0 )\n model.addConstr(reserve == 0)\n continue\n model.addConstr( on_vars[name, iHr] * gen_dict[name].eco_min[iHr] <=\n prod_vars[name, iHr] + reserve, \n name=\"EcoMin\" + name + \"H\" + str(iHr) ) \n model.addConstr( prod_vars[name, iHr] + reserve <= \n on_vars[name, iHr] * gen_dict[name].eco_max[iHr], \n name=\"Ecomax\" + name + \"H\" + str(iHr) )\n #you need to be on in order to offer spinning reserve\n if (name, iHr, \"TMSR_Cap\") in reserve_vars:\n model.addConstr( reserve_vars[name, iHr, \"TMSR_Cap\"] <= \n on_vars[name, iHr] * gen_dict[name].TMSR_Cap, \n name=\"SpinningReserve\" + \"name\" + str(iHr) )",
"def gaussian(dims: Tuple[int, int], cutoff_freq: float) -> np.ndarray:\n # create grid\n m, n = [(dim - 1) / 2 for dim in dims]\n yy, xx = np.ogrid[-m : m + 1, -n : n + 1]\n\n # compute transfer function\n tf = np.exp(-(np.power(xx, 2) + np.power(yy, 2)) / (2 * np.power(cutoff_freq, 2)))\n\n # normalize and return transfer func\n return (tf - np.max(tf)) / (np.max(tf) - np.min(tf))",
"def make_uniform_x(self, x_resolution, min_x = None, max_x = None, bin_above = 2.0, **kwargs):\n \n if min_x is None or max_x is None:\n a, b = self.get_min_max_x(**kwargs)\n if min_x is None:\n min_x = a\n if max_x is None:\n max_x = b\n \n new_x = numpy.arange(min_x, max_x + x_resolution / 2, x_resolution)\n \n for m in range(len(self.mess)):\n if m not in exclude and self.mess[m][\"class\"] not in exclude:\n resolution = (numpy.amax(m.x) - numpy.amin(m.x)) / len(m.x)",
"def func_full(x, c1, c2, c3, c4, c5, c6, c7):\n thermalCore = c1 * np.sqrt(x) * np.exp(-c2 * x)\n a = map(lambda y: 0 if y < c5 else 1, x)\n b = map(lambda y: 0 if y < c6 else 1, x)\n #b1 = map(lambda y: 1 - y, b)\n a = np.array(a)\n b = np.array(b)\n b1 = 1.0 - b\n #powerLaw = c3 * a * np.power(x, -c4) * (b1 + b * np.exp(-c7*(x-c6)))\n powerLaw = 0.001 * a * np.power(x, -c4) * b1\n return thermalCore + powerLaw"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct Artillery YAML configuration | def set_yaml_config(self) -> None:
# LT-248: We can pick Artillery Phase configuration from conf file
self.yaml_config = {
"config": {
"target": self.get_swagger_url(),
"processor": f"./{self.OUT_FILE}",
"phases": [
{
"duration": settings.DURATION or 1,
"arrivalRate": settings.SPAWN_RATE or 1
}
]
},
"scenarios": self.task_set.yaml_flow
} | [
"def setupFromYml(self, yml):",
"def __build_yaml(self):\n \n with open(self.mainConfigFile, \"r\") as f:\n self.configFiles = yaml.safe_load(f)\n\n self.yamlStream = \"# \" + self.find_file(self.configFiles['head'])+'\\n'\n with open(self.find_file(self.configFiles['head']), \"r\") as f:\n self.yamlStream = self.yamlStream + f.read() + '\\n'\n\n if 'definitions' in self.configFiles.keys():\n self.__append_yaml(self.configFiles['definitions'])\n\n if 'aircrafts' in self.configFiles.keys():\n self.yamlStream = self.yamlStream + \"aircrafts:\\n\"\n for filename in self.configFiles['aircrafts']:\n self.yamStream = self.yamlStream + ' - '\n self.__append_yaml(filename, prefix=' ')\n self.yamlStream = self.yamlStream + '\\n'",
"def _make_builder_configs():\n release1 = TedliumReleaseConfig(\n name=\"release1\",\n description=\"\"\"\\\n The TED-LIUM corpus is English-language TED talks, with transcriptions,\n sampled at 16kHz. It contains about 118 hours of speech.\n\n This is the TED-LIUM corpus release 1,\n licensed under Creative Commons BY-NC-ND 3.0\n (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en).\n \"\"\",\n citation=\"\"\"\\\n @inproceedings{rousseau2012tedlium,\n title={TED-LIUM: an Automatic Speech Recognition dedicated corpus},\n author={Rousseau, Anthony and Del{\\\\'e}glise, Paul and Est{\\\\`e}ve, Yannick},\n booktitle={Conference on Language Resources and Evaluation (LREC)},\n pages={125--129},\n year={2012}\n }\n \"\"\",\n url=\"https://www.openslr.org/7/\",\n download_url=\"http://www.openslr.org/resources/7/TEDLIUM_release1.tar.gz\",\n split_paths=[\n (tfds.Split.TRAIN, os.path.join(\"TEDLIUM_release1\", \"train\")),\n (tfds.Split.VALIDATION, os.path.join(\"TEDLIUM_release1\", \"dev\")),\n (tfds.Split.TEST, os.path.join(\"TEDLIUM_release1\", \"test\")),\n ],\n )\n\n release2 = TedliumReleaseConfig(\n name=\"release2\",\n description=\"\"\"\\\n This is the TED-LIUM corpus release 2,\n licensed under Creative Commons BY-NC-ND 3.0\n (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en).\n\n All talks and text are property of TED Conferences LLC.\n\n The TED-LIUM corpus was made from audio talks and their transcriptions\n available on the TED website. We have prepared and filtered these data\n in order to train acoustic models to participate to the International\n Workshop on Spoken Language Translation 2011 (the LIUM English/French\n SLT system reached the first rank in the SLT task).\n\n Contains 1495 talks and transcripts.\n \"\"\",\n citation=\"\"\"\\\n @inproceedings{rousseau2014tedlium2,\n title={Enhancing the {TED-LIUM} Corpus with Selected Data for Language Modeling and More {TED} Talks},\n author={Rousseau, Anthony and Del{\\\\'e}glise, Paul and Est{\\\\`e}ve, Yannick},\n booktitle={Conference on Language Resources and Evaluation (LREC)},\n year={2014}\n }\n \"\"\",\n url=\"https://www.openslr.org/19/\",\n download_url=(\n \"http://www.openslr.org/resources/19/TEDLIUM_release2.tar.gz\"\n ),\n split_paths=[\n (tfds.Split.TRAIN, os.path.join(\"TEDLIUM_release2\", \"train\")),\n (tfds.Split.VALIDATION, os.path.join(\"TEDLIUM_release2\", \"dev\")),\n (tfds.Split.TEST, os.path.join(\"TEDLIUM_release2\", \"test\")),\n ],\n )\n\n release3 = TedliumReleaseConfig(\n name=\"release3\",\n description=\"\"\"\\\n This is the TED-LIUM corpus release 3, licensed under Creative Commons\n BY-NC-ND 3.0.\n\n All talks and text are property of TED Conferences LLC.\n\n This new TED-LIUM release was made through a collaboration between the\n Ubiqus company and the LIUM (University of Le Mans, France)\n\n Contents:\n\n - 2351 audio talks in NIST sphere format (SPH), including talks from\n TED-LIUM 2: be careful, same talks but not same audio files (only\n these audio file must be used with the TED-LIUM 3 STM files)\n - 452 hours of audio\n - 2351 aligned automatic transcripts in STM format\n - TEDLIUM 2 dev and test data: 19 TED talks in SPH format with\n corresponding manual transcriptions (cf. 'legacy' distribution below).\n - Dictionary with pronunciations (159848 entries), same file as the one\n included in TED-LIUM 2\n - Selected monolingual data for language modeling from WMT12 publicly\n available corpora: these files come from the TED-LIUM 2 release, but\n have been modified to get a tokenization more relevant for English\n language\n\n Two corpus distributions:\n - the legacy one, on which the dev and test datasets are the same as in\n TED-LIUM 2 (and TED-LIUM 1).\n - the 'speaker adaptation' one, especially designed for experiments on\n speaker adaptation.\n \"\"\",\n citation=\"\"\"\\\n @inproceedings{hernandez2018tedlium3,\n title={TED-LIUM 3: twice as much data and corpus repartition for experiments on speaker adaptation},\n author={Hernandez, Fran{\\\\c{c}}ois and Nguyen, Vincent and Ghannay, Sahar and Tomashenko, Natalia and Est{\\\\`e}ve, Yannick},\n booktitle={International Conference on Speech and Computer},\n pages={198--208},\n year={2018},\n organization={Springer}\n }\n \"\"\",\n url=\"https://www.openslr.org/51/\",\n download_url=\"http://www.openslr.org/resources/51/TEDLIUM_release-3.tgz\",\n split_paths=[\n (\n tfds.Split.VALIDATION,\n os.path.join(\"TEDLIUM_release-3\", \"legacy\", \"dev\"),\n ),\n (\n tfds.Split.TEST,\n os.path.join(\"TEDLIUM_release-3\", \"legacy\", \"test\"),\n ),\n # The legacy/train directory contains symlinks to \"data\",\n # which are skipped by extraction (see above).\n # Work around this by manually dereferencing the links here.\n (tfds.Split.TRAIN, os.path.join(\"TEDLIUM_release-3\", \"data\")),\n ],\n )\n\n return [release1, release2, release3]",
"def yamlConfigForParsingPlugins():\n parameters = \"\"\"\njoinPaths: !joinPaths\n - a\n - b\n - \"c\"\nrunPageTemplates: !findRunPageTemplates\n - \"templates\"\nbcrypt: !bcrypt\n bcryptLogRounds: 12\n user: \"pass\"\nbcryptNoUser: !bcrypt\n bcryptLogRounds: 12\n null: null\nsecretKey: !secretKey 12345\nsecretKeyGen: !secretKey null\n \"\"\"\n # Load parameters\n parameters = yaml.load(parameters, Loader = yaml.SafeLoader)\n return parameters",
"def create_yaml(self):\n if self._language == PYTHON:\n language_str = 'python'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._python_dependencies()\n elif self._language == NODE:\n language_str = 'node'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._node_dependencies()\n elif self._language == DOTNET:\n language_str = 'dotnet'\n package_route = '$(System.DefaultWorkingDirectory)/publish_output/s'\n dependencies = self._dotnet_dependencies()\n elif self._language == POWERSHELL:\n language_str = 'powershell'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._powershell_dependencies()\n else:\n raise LanguageNotSupportException(self._language)\n\n if self._app_type == WINDOWS:\n platform_str = 'windows'\n yaml = self._generate_yaml(dependencies, 'VS2017-Win2016', language_str, platform_str, package_route)\n else:\n platform_str = 'linux'\n yaml = self._generate_yaml(dependencies, 'ubuntu-16.04', language_str, platform_str, package_route)\n\n with open('azure-pipelines.yml', 'w') as f:\n f.write(yaml)",
"def setup_yaml_parser():\n var = re.compile(r\".*\\$\\{.*\\}.*\", re.VERBOSE)\n yaml.add_constructor('!env_var', _env_var_constructor)\n yaml.add_implicit_resolver('!env_var', var)",
"def generate_config():\n\n return {\n \"email_subject\": DEFAULT_EMAIL_SUBJECT,\n \"from_email\": DEFAULT_FROM_EMAIL,\n \"to_email\": DEFAULT_TO_EMAIL,\n \"url\": DEFAULT_URL,\n \"start_value\": DEFAULT_START_VALUE,\n \"look_ahead\": DEFAULT_LOOK_AHEAD,\n \"slide_window\": DEFAULT_SLIDE_WINDOW,\n }",
"def build_configs():",
"def user_create_yaml(self):\n pass",
"def config_enclosure() -> dict:\n with open(get_test_file_path('pygeoapi-test-config-enclosure.yml')) as fh:\n return yaml_load(fh)",
"def _generate_configs(self):\n return _generate_anchor_configs(self.min_level, self.max_level,\n self.num_scales, self.aspect_ratios)",
"def get_yaml_editor(**kwargs: Any) -> YAML:\r\n explicit_start = kwargs.pop(\"explicit_start\", True)\r\n explode_aliases = kwargs.pop(\"explode_aliases\", False)\r\n preserve_quotes = kwargs.pop(\"preserve_quotes\", True)\r\n\r\n # The ruamel.yaml class appears to be missing some typing data, so\r\n # these valid assignments cannot be type-checked.\r\n yaml = YAML()\r\n\r\n yaml.indent(mapping=2, sequence=4, offset=2)\r\n yaml.explicit_start = explicit_start # type: ignore\r\n yaml.preserve_quotes = preserve_quotes # type: ignore\r\n yaml.width = maxsize # type: ignore\r\n\r\n if explode_aliases:\r\n yaml.default_flow_style = False\r\n\r\n return yaml",
"def get_configured_yaml() -> ModuleType:\n import yaml\n\n from manubot.cite.csl_item import CSL_Item\n\n yaml.add_representer(str, _yaml_str_representer)\n # CSL_Item: pyyaml chokes on dict subclass\n # https://github.com/yaml/pyyaml/issues/142\n # https://stackoverflow.com/a/50181505/4651668\n yaml.add_representer(\n CSL_Item,\n lambda dumper, data: dumper.represent_mapping(\n tag=\"tag:yaml.org,2002:map\", mapping=data.items()\n ),\n )\n return yaml",
"def _create_yaml_map(self):",
"def celery_config() -> Dict:\n with open(script_dir + 'config.yml', 'r') as yamlfile:\n cfg = yaml.load(yamlfile, Loader=yaml.SafeLoader)\n celery_cfg = cfg['celery']\n result = {\n 'main': celery_cfg['main'],\n 'broker': celery_cfg['broker_url'],\n 'backend': celery_cfg['backend_url'],\n }\n return result",
"def config_maker(args):\n\n dico = dict()\n dico['fastq'] = args.fastq\n dico['path_fast5'] = args.fast5\n if args.summary:\n dico['summary'] = args.summary\n else:\n dico['summary'] = False\n dico['splitted'] = args.splitted\n dico['reference'] = args.reference\n if args.out:\n dico['output'] = args.out\n\n return dico",
"def init_yaml(self):\n f = open(\"content/\"+self.match_name+'.md',\"r\")\n text = f.read().split('---')[0]\n self.yaml = yaml.load(text, Loader=Loader)",
"def _parse_yaml_configs(args, anon_component_prefix=\"anon_app\"):\n # Configuration files are basically nested dictionaries and the command-line arguments\n # are a list with each element being a dictionary. If the dict in the args has the key\n # 'class', then it is anonymous and we should just give it a sequential unique name to\n # ensure it is run. If, however, it does not, then we should assume that it's a NAMED\n # configuration and so we can actually use that to overwrite/modify the configurations\n # pulled in from a file.\n\n new_configs = {}\n for arg in args:\n try:\n arg = yaml.load(arg)\n except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:\n raise ValueError(\"error parsing manual configuration: %s\\nError:%s\" % (arg, e))\n\n # If this config is anonymous, give it a unique name and add it to configs\n # since it couldn't possibly overwrite another config entry.\n # NOTE: if user specified a 'name' entry directly, we will still take that later on...\n if 'class' in arg:\n # TODO: perhaps register these names somewhere to ensure uniqueness?\n global __scale_client_n_anon_apps_added__\n unique_key = anon_component_prefix + str(__scale_client_n_anon_apps_added__)\n __scale_client_n_anon_apps_added__ += 1\n new_configs[unique_key] = arg\n else:\n try:\n new_configs.update(arg)\n except TypeError as e:\n raise ValueError(\"error in your manual configuration: %s\\n\"\n \"couldn't be interpreted as a dict due to error: %s\" % (arg, e))\n\n return new_configs",
"def devpiserver_genconfig(tw, config, argv, writer):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tell if a person if allergic to the given allergen. | def is_allergic_to(self, allergen):
return allergen in self.list | [
"def is_allergen(self, is_allergen):\n\n self._is_allergen = is_allergen",
"def is_girl(self):\n if self.gneder == self.GIRL: return True;",
"def in_garden(obj):\n print(\"Searching the garden's random objects\")\n return obj in _random_objects",
"def isrelatierekening(self, rekening):\n if type(rekening) == str or type(rekening) == unicode:\n return rekening in self.ledenrek or rekening in self.olvrek or rekening in self.externrek\n return rekening.naam in self.ledenrek or rekening.naam in self.olvrek or rekening.naam in self.externrek",
"def update_certain_knowledge(self):\n NeedToCheck = True\n while NeedToCheck:\n NeedToCheck = False\n\n # look through each allergen_possibility, ignoring anything in known_allergens\n for this_allergen in [\n a\n for a in self.allergen_possibilities.keys()\n if a not in self.known_allergens\n ]:\n possibility_count = len(self.allergen_possibilities[this_allergen])\n if 1 == possibility_count:\n the_definite_ingredient = list(\n self.allergen_possibilities[this_allergen]\n )[0]\n # ok, let's go boys.. we've got a new fact!\n print(\n f\"We now know that ingredient {the_definite_ingredient} has the allergen {this_allergen}\"\n )\n # we can add this to known_allergens list\n self.known_allergens[this_allergen] = the_definite_ingredient\n\n # we must remove this ingredient as an option from all the other allergens in the known universe\n for target_allergen in self.allergen_possibilities:\n if this_allergen != target_allergen:\n if (\n the_definite_ingredient\n in self.allergen_possibilities[target_allergen]\n ):\n self.allergen_possibilities[target_allergen].remove(\n the_definite_ingredient\n )\n # and we need to go round again..\n NeedToCheck = True",
"def add(self, alergen: Alergen) -> bool:\n pass",
"def eligiblePresident(age,bornInHomeland):\n return (age>=35) and bornInHomeland",
"def is_ligand(self):\n if any(LigandComponentAdaptor().fetch_by_residue_id(r.residue_id) for r in self.Residues):\n return True\n else:\n return False",
"def lent_out(self):\n return self in Book.on_loan",
"def is_fullgen(nom, gen, n=2): \n if nom[:n] == gen[:n]:\n return True\n else:\n if gen in ('oris'):\n return True\n else:\n return False",
"def _bot_assigned_bell(self, bell: Bell) -> bool:\n return self._tower.is_bell_assigned_to(bell, self._user_name)",
"def __in__(self, grilles):\n for grille in grilles:\n if self == grille:\n return 1\n return 0",
"def is_bothell_student():\n return _is_member('uw_affiliation_bothell-student')",
"def satisfies(self, reg):\n ### If no value, there is no need for filtering\n if self.getValues()==['']:\n return True\n affiliation = self.getValues()[0]\n return True if (affiliation == reg.getRepresentationType()[\"organizationRepresentative\"]) else False",
"def is_lucky(chance):\n return randint(0, 100) <= chance",
"async def get_guardian_email(guardian_id: UUID, angel_name: str) -> str:\n try:\n user = await User.get(id=guardian_id)\n except DoesNotExist:\n return False\n\n angels = await user.fetch_related(\"angels\")\n for angel in angels:\n if angel.name == angel_name:\n return user.email\n return False",
"def test_check_birth(self):\n herb = Fa.Herbivore(weight=60, age=20)\n herb2 = Fa.Herbivore(weight=33.24, age=2)\n carn = Fa.Carnivore(weight=60, age=20)\n print(min(1, herb.p['gamma'] * herb.fitness*(4 - 1))) # 0.58\n assert herb.check_birth(1) is False\n rd.seed(11) # rd.random() = 0.45\n assert herb.check_birth(4) is True\n assert isinstance(herb.check_birth(40), bool)\n assert herb2.check_birth(100) is False\n assert carn.check_birth(6) is True",
"def is_any_mentor_became_human(self):\n for mentor in self.mentors:\n if mentor.humanity_level >= 10:\n print(\"\\033[44m\"+mentor.first_name, mentor.last_name+\" called \"+ mentor.nickname+\" has become human \"\n \"Is ready to deliver to new Codecool facility!\", mentor.first_name, mentor.last_name,\n \"may the Force be with You!\\033[0m\")\n time.sleep(3)\n return True\n return False",
"def _user_assigned_bell(self, bell: Bell) -> bool:\n return not self._bot_assigned_bell(bell)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This returns a single entry corresponding to the Directory Entity referred to by FolderEntityData. The returned string is given below (between Start and End) Start | def getFolderEntry(FolderEntityData):
if FolderEntityData.Type not in ['IntermediateDir', 'ExperimentDir']:
errprint('\nThe given EntityData does not represent the data of a directory')
raise ValueError
OutputLines = []
OutputLines.append("FolderID : {UID}".format(UID=FolderEntityData.ID))
OutputLines.append("ParentFolderID : {UID}".format(UID=FolderEntityData.ParentID))
OutputLines.append("FolderType : {Type}".format(Type=FolderEntityData.Type))
OutputLines.append("FolderTitle : {Title}".format(Title=FolderEntityData.Title))
OutputLines.append("FolderDescription: |-2")
OutputLines += [" "+Line for Line in FolderEntityData.Description.splitlines()]
OutputLines.append("")
return "\n".join(OutputLines) | [
"def getFolderItemName(self) -> unicode:\n ...",
"def folder_key(title,folder_name=DEFAULT_FOLDER_NAME):\n #parameter order is reversed because of kwargs necessities :(\n #i dont use this atm\n return ndb.Key('Folder', folder_name,'File',title)",
"def folder_key(self):\n return self._folder_key",
"def directory_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"directory_id\")",
"def _get_ds_name_folder_path(self, backing):\n vmdk_ds_file_path = self.volumeops.get_path_name(backing)\n (datastore_name,\n folder_path, _) = volumeops.split_datastore_path(vmdk_ds_file_path)\n return (datastore_name, folder_path)",
"def fl_get_folder_name(ptr_flobject):\n _fl_get_folder_name = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_folder_name\",\n xfdata.STRING, [cty.POINTER(xfdata.FL_OBJECT)],\n \"\"\"const char * fl_get_folder_name(FL_OBJECT * ob)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.keep_elem_refs(ptr_flobject)\n retval = _fl_get_folder_name(ptr_flobject)\n if isinstance(retval, bytes):\n return retval.decode('utf-8')\n else: # str\n return retval",
"def path(self):\n return self._dir_entry.path",
"def ProjectFolderId(self):\n return self.raw_project_folder_data.get(\"ProjectFolderId\")",
"def _getdescription(self):\r\n result = self._session.execute(\"query -t folder -n %s -i %s -u -f \\\"%%description\\\"\" % (self.name, self.instance))\r\n return result.output.strip()",
"def get_relative_name(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetRelativeName', self.handle)",
"def _get_root_metadata(self):\n r, rx_dict = self._do_request(\n 'get',\n http_server_utils.join_url_components(\n [self._api_drive_endpoint_prefix, 'listfolder']),\n params={'folderid': 0})\n return rx_dict['metadata']",
"def _get_folder(self):\n # type: () -> str\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n response = self.connection.api_call(\n \"GET\", [\"v1\", \"resources\", self.id, \"folderpath\"], headers=headers\n )\n\n return response.json().get(\"path\")",
"def folder_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"folder_id\")",
"def _summarize_home_folder_into_string(self, folder_id):\n subfolders, files = self._get_home_folder_contents()\n \n str_list = [\n # List all subfolders\n *[str(index) + \". \" + item['title'] for index, item in enumerate(subfolders, 1)],\n \n # An empty space to separate folder from files\n \"\",\n \n # List all files\n *[str(index) + \". \" + item['title'] for index, item in enumerate(files, len(subfolders) + 1)],\n ]\n \n str_reply = \"\\n\".join(str_list)\n return str_reply",
"def Directory(self) -> str:",
"def get_dcim_folder(device_pidl, parent):\r\n device_name = parent.GetDisplayNameOf(device_pidl, shellcon.SHGDN_NORMAL)\r\n name = None\r\n pidl = None\r\n\r\n folder = parent.BindToObject(device_pidl, None, shell.IID_IShellFolder)\r\n try:\r\n top_dir_name = \"\"\r\n for pidl in folder.EnumObjects(0, shellcon.SHCONTF_FOLDERS):\r\n top_dir_name = folder.GetDisplayNameOf(pidl, shellcon.SHGDN_NORMAL)\r\n break # Only want to see the first folder.\r\n if top_dir_name != \"Internal Storage\":\r\n return None, None, device_name\r\n except pywintypes.com_error:\r\n return None, None, device_name # No problem, must not be an iPhone\r\n\r\n folder = folder.BindToObject(pidl, None, shell.IID_IShellFolder)\r\n for pidl in folder.EnumObjects(0, shellcon.SHCONTF_FOLDERS):\r\n name = folder.GetDisplayNameOf(pidl, shellcon.SHGDN_NORMAL)\r\n break # Only want to see the first folder.\r\n if name != \"DCIM\":\r\n logger.warning(\"%s's '%s' has '%s', not a 'DCIM' dir.\" %\r\n (device_name, top_dir_name, name))\r\n return None, None, device_name\r\n\r\n return pidl, folder, device_name",
"def _lookup_used_entity_id(self, file_details):\n # Since this uses the response from POST to /files/ this will include the ancestors and not be\n # effected by exclude_response_fields that were used when listing the project\n name_parts = [ancestor['name'] for ancestor in file_details['ancestors']\n if ancestor['kind'] == KindType.folder_str]\n name_parts.append(file_details['name'])\n remote_path = RemotePath.add_leading_slash(os.sep.join(name_parts))\n return self.activity.remote_path_to_file_version_id[remote_path]",
"def get_folder_short_name_for_location(self, location):\n _method_name = 'get_folder_short_name_for_location'\n _logger.entering(location.get_folder_path(), class_name=_class_name, method_name=_method_name)\n folder_dict = self.__get_dictionary_for_location(location, False)\n result = ''\n if SHORT_NAME in folder_dict:\n result = folder_dict[SHORT_NAME]\n _logger.exiting(class_name=_class_name, method_name=_method_name, result=result)\n return result",
"def get_foldername(self):\n return self.logfolder"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This returns a single entry corresponding to the Experiment Entity referred to by ExpEntityData. The returned string is given below (between Start and End) Start | def getExperimentEntry(ExpEntityData):
# Validate that ExpEntityData actually corresponds to an Experiment Entity
if ExpEntityData.Type != 'Experiment':
errprint("\nThe Entity Data does not represent the data of an experiment")
raise ValueError
OutputLines = []
OutputLines.append("")
OutputLines.append("- ID : {ID}".format(ID=ExpEntityData.ID))
OutputLines.append(" Title : {Title}".format(Title=ExpEntityData.Title))
OutputLines.append(" Description: |-2")
OutputLines += [" "+Line for Line in ExpEntityData.Description.splitlines()]
OutputLines.append("")
OutputLines.append(
"{0:#<100}".format("## End of Experiment {UID} ".format(UID=ExpEntityData.ID)))
return "\n".join(OutputLines) | [
"def entity_description(self, eid):\n entities = self._load_entities()\n return entities[eid][\"description\"]",
"def getEntity(self):\n\n fid = file(self.filename)\n entityre = re.compile(\"entity (\\w+) is\", re.IGNORECASE)\n\n matches = entityre.search(fid.read())\n self.entityname = matches.groups()[0]\n return self.entityname",
"def print_entity(entity):\n print 'entity.original_text:', entity.original_text\n print 'entity.display_text:', entity.display_text\n print 'entity.display_html:', entity.display_html\n print 'entity.start_index:', entity.start_index\n print 'entity.end_index:', entity.end_index",
"def entity_tostring(entity):\n\n metadata = \", \".join(['\"%s\": \"%s\"' % (key, value) for\n key, value in entity.metadata.items()])\n\n mentions = \", \".join(['\"%s\"' % mention for mention in entity.mentions])\n\n return ('{name: \"%s\",'\n ' type: \"%s\",'\n ' metadata: {%s},'\n ' salience: %s,'\n ' mentions: [%s]}') % (\n entity.name,\n entity.type,\n metadata,\n entity.salience,\n mentions)",
"def _getEntityEndKey(entityId):\n return \"%s\\x1E\" % entityId",
"def _getEntityStartKey(entityId):\n return \"%s\\x1D\" % entityId",
"def extent(obj):\n return obj.get('startOffset', -1), obj.get('endOffset', -1)",
"def describe_entry(e, fields):\n from operator import getitem\n from six.moves import reduce\n return \" \".join([reduce(getitem, f.split('.'), e)\n for f in fields])",
"def main_entity_of_page(self) -> str:\n return self._main_entity_of_page",
"def entity_name(self):\n return self.entity.name",
"def details(self):\n return self.request(\"/details.json\")[\"Response\"][\"Data\"][\"Entity\"]",
"def get_data(self):\n # Check if we have downloaded experiment list yet\n if not self.experiment_container:\n self.populate_from_db()\n # Retrieve experiment results from database\n measurement_container = list(map(list, zip(*self.experiment_container)))[1]\n #Return all experiment IDs and experimental data associated with this entity\n #replace db.Experiment.ID with db.Experiment.type to index by experiment names not experiment IDs\n stm = cfg.session.query(db.Experiment.type,db.Measurement.data)\\\n .select_from(db.Measurement) \\\n .join(db.Object).join(db.Entity).join(db.Experiment)\\\n .filter(db.Measurement.ID.in_(measurement_container))\n return stm.all()",
"def test_entity(self):\n self.request.log(\"Hello World\", entities=(Entity(1337)(12, \"Demo\"),))\n self.request.end()\n entry = self.get_entry()\n assert 'entities' in entry\n assert len(entry['entities']) == 1\n assert entry['entities'][0] == dict(entity=1337, id=12, name=\"Demo\")",
"def __str__(self) -> str:\n st = \"<Entity>: \\n{\\n\"\n for k, v in self._keys.items():\n if not isinstance(v, list):\n st += f\"\\t {k} = \\\"{v}\\\"\\n\"\n if self._fixup is not None:\n for k, v in self.fixup.items():\n st += f\"\\t ${k} = \\\"{v}\\\"\\n\"\n\n for out in self.outputs:\n st += f'\\t{out!s}\\n'\n st += \"}\\n\"\n return st",
"def get_description(self):\n return \"It is an Entity.\"",
"def entity_name(self) -> str:\n return self._entity_name",
"def exp_metadata(self) -> LabExperiment:\n\n return self._exp_metadata",
"def entity_name(self):\n return self.__entity_name",
"def commandGetEntitySummary(entityClient: EntityClient, entity: str):\n demisto.debug('commandGetEntitySummary has been called.')\n\n result: Dict[str, Any] = entityClient.getEntitySummary(entity)\n\n prefix = 'Insight.Entity.Summary'\n key = 'summary'\n\n if not result:\n raise Exception(f'We receive an invalid response from the server ({result})')\n\n if key not in result:\n raise Exception(f'We receive an invalid response from the server (The response does not contains the key: {key})')\n\n if not result.get(key):\n return \"We could not find any result for Get Entity Summary.\"\n\n return CommandResults(\n outputs_prefix=prefix,\n outputs_key_field=key,\n outputs=result.get(key)\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get all the employees out of the database | def get_employees(self):
from Employee import Employee
cursor = self.dbconnect.get_cursor()
cursor.execute('select * from employee')
employees = list()
for row in cursor:
employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])
employees.append(employee)
return employees | [
"def get_employees():\n employees = list()\n try:\n connection = DBConnection.getConnection()\n cursor = connection.cursor()\n cursor.execute(\"select * from employee;\")\n rows = cursor.fetchall()\n connection.commit()\n for data in rows:\n emp_id = data[0]\n name = data[1]\n designation_code = data[2]\n dob = data[3]\n day = dob.strftime(\"%d\")\n month = dob.strftime(\"%m\")\n year = dob.strftime(\"%Y\")\n salary = data[4]\n gender = data[5]\n indian = data[6]\n pan_no = data[7]\n aadhar = data[8]\n employee = Employee(emp_id, name, designation_code, day,\n month, year, salary, gender, indian, pan_no, aadhar)\n employees.append(employee)\n except Error as error:\n raise DataLayerError(message=error.msg)\n finally:\n try:\n if cursor.is_open():\n cursor.close()\n if connection.is_connected():\n connection.close()\n except:\n pass\n return employees",
"def get_employees(self):\n self.employee_list = []\n try:\n employees = self.db['employees'].all()\n # loop through what we get back from DB\n for emp in self.db['employees']:\n self.employee_list.append(\n employee.Employee(int(emp['id']), str(emp['name']), str(emp['password']), int(emp['role'])))\n except:\n print(\"error\")\n self.statusbar.showMessage(\"Error loading employee data\", 4000)",
"def get_employees(self):\n for employees in self.employees:\n return employees",
"def get_all_employee():\r\n conn = sqlite3.connect('test2.db')\r\n try:\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT * FROM EMPLOYEES\")\r\n rows = cur.fetchall()\r\n employee_list = []\r\n for row in rows:\r\n employee_list.append(row)\r\n return employee_list\r\n except (sqlite3.ProgrammingError, sqlite3.Error) as e:\r\n print(\"TABLE EMPLOYEES not found\")\r\n except sqlite3.Error as er:\r\n print(\"Database error: \", er.message)\r\n except Exception as e:\r\n print(e)",
"def getEmployees(self):\n return self.employees",
"def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200",
"def get_employees(self):\n return self.employees",
"def employees(self):\n page = app.config['EMPLOYEES_PER_PAGE']\n emps = DeptEmp.query.filter_by(dept_no=self.dept_no)\n return [e.emp_no for e in current_records(emps, g.date)]",
"def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")",
"def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False",
"def fourth_query():\n employees = []\n for name, surname, department_name, l_city, l_state_province in session.query(Employees.first_name,\n Employees.last_name,\n Departments.depart_name,\n Locations.city,\n Locations.state_province).filter(\n Employees.department_id == Departments.department_id, Departments.location_id == Locations.location_id).all():\n employees.append(f'{name}, {surname}, {department_name}, {l_city}, {l_state_province}')\n return employees",
"def process_all_employees(self):\n return jsonify(encode_list(self.facade_service.get_all_employees()))",
"def get_employees(cls, strategy=lazyload):\n cls._check_strategy(strategy)\n\n return db.session.query(Employee).options(\n strategy(Employee.department)\n ).all()",
"def test_get_all_employees(self):\n with self.app.app_context():\n self.assertNotEqual(es.get_all(), [])",
"def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list",
"def seventh_query():\n employees = []\n for name, surname, salary in session.query(Employees.first_name, Employees.last_name, Employees.salary).filter(\n Employees.department_id == Departments.department_id, Departments.location_id == Locations.location_id).\\\n filter(Locations.city == 'London').all():\n employees.append(f'{name}, {surname}, {salary}')\n\n # Вариант №2\n # for name, surname, salary in session.query(Employees.first_name, Employees.last_name, Employees.salary).\\\n # join(Departments, Locations).filter(Locations.city == 'London').all():\n # employees.append(f'{name}, {surname}, {salary}')\n return employees",
"def get_employees(self):\n if self.__employees == []:\n with open(\"./data/employees.csv\", \"r\") as employee_file:\n for line in employee_file.readlines():\n username, password = line.split(\",\")\n #new_employee = Employee(username, password)\n self.__employees.append(username) \n \n return self.__employees",
"def get_employees_in_department(department_name: str) -> list:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT Employee.empid, Employee.name\n FROM Employee JOIN EmployeeDepartments USING(empid)\n WHERE EmployeeDepartments.department = %s\"\"\"\n cur.execute(sql, (department_name,))\n\n # Attempt to fetch all rows\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n employees = []\n for row in result:\n employees.append(\n [row[0], row[1]]\n )\n cur.close()\n conn.close()\n return employees\n except Exception as e:\n print(\"ooo\")\n print(e)\n # If nothing was returned, return empty list\n cur.close()\n conn.close()\n return []\n\n # TODO Dummy Data - Change to be useful!\n # Return the employees in the department.\n # Each \"row\" has: [ empid, name ]\n\n # employees = [\n # [15905, 'Rea Fibbings'],\n # [9438, 'Julia Norville'],\n # [36020, 'Adora Lansdowne'],\n # [98809, 'Nathanial Farfoot'],\n # [58407, 'Lynne Smorthit'],\n # ]\n #\n # return employees",
"def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
this function gets all the admins from the database | def get_admins(self):
from Employee import Employee
admins = list()
cursorRoles = self.dbconnect.get_cursor()
cursorRoles.execute('select * from employeeRoles where role=\'admin\'')
for row in cursorRoles:
admins.append(self.get_employee(row[0]))
return admins | [
"def get_all_administrators():\n return User.objects.filter(groups__name=\"administrators\")",
"def get_admins():\n users = get_users()\n admins = []\n for user in users:\n if user[\"approval_level\"] == \"admin\":\n admins.append(user)\n\n return admins",
"def get_admins(name):\n obj = DataService.objects(name=name).first()\n if obj is None:\n return []\n return list(obj.admins)",
"def get_admins(self):\n return self.admins_group.user_set.all()",
"def list_admin() -> None:\n admin_users = list(User.objects(admin=True).scalar('email'))\n if admin_users:\n echo('Allowed admins are')\n for email in admin_users:\n echo('- %s' % email)\n else:\n echo('No admins found')\n\n users = list(User.objects(admin=False).scalar('email'))\n if users:\n echo('Rest of users are:')\n for email in users:\n echo('- %s' % email)",
"async def _ad_list(self, ctx):\n admin_list = self.database.get_admins(ctx.guild.id)\n if len(admin_list) > 0:\n out = \"```\"\n for admin in admin_list:\n admin_name = self.bot.get_user(admin.user_id)\n admin_name = str(admin_name) if admin_name is not None else admin.user_id\n out += f\"{admin_name}\\n\"\n out += \"```\"\n await ctx.send(out)\n else:\n await ctx.send(\"This guild currently has no administrators.\")",
"def get_admins(self):\n admins = User.objects.filter(Q(groups__name=self.admin_group_name()) | Q(is_superuser=True)).distinct()\n return admins",
"async def _ad_all(self, ctx):\n all_admins = self.database.get_all_admins()\n consumed = []\n out = \"```\"\n for admin in all_admins:\n if admin.guild_id not in consumed:\n out += f\"Guild: {self.bot.get_guild(admin.guild_id)}\\n\"\n consumed.append(admin.guild_id)\n admin = self.bot.get_user(admin.user_id)\n admin = str(admin) if admin is not None else admin.user_id\n out += f\" {admin}\\n\"\n if out != \"```\":\n out += \"```\"\n await ctx.send(out)\n else:\n await ctx.send(\"No admins currently\")",
"def get_all_admins(cls):\r\n try:\r\n # Create a list of all admins in db excluding the super admin.\r\n all_users = list(cls.admin_collection.find({\"username\":{\"$nin\":[parser.get('API', 'ADMIN_NAME')]}},\r\n { \"_id\":0, \"password\":0}))\r\n return all_users\r\n except errors.PyMongoError as e:\r\n ## TODO: Logging\r\n return False",
"def getAllAdmins():\r\n if len(request.args) == 0:\r\n users = models.User.query.all()\r\n else:\r\n users = models.User.query.filter(\r\n models.User.Name.like((\"%\" + request.args[\"name\"] + \"%\")) if request.args[\"name\"] is not None else \"\").all()\r\n data = []\r\n for user in users:\r\n data.append({'id': user.id, 'mobile': user.mobile, 'state': user.state, 'isSuperAdmin': user.isSuperAdmin,\r\n 'name': user.Name})\r\n return jsonify(code=200, msg=\"获取管理员列表成功\", data=data)",
"def getDevelAdmins(self):\n pass",
"def get_list_of_admins() -> List[User]:\n return DBDiscussionSession.query(User).filter(User.group == Group.ADMIN).all()",
"def return_admin_list(request):\n del request\n return return_user_list(Administrador)",
"def admins(self):\n return User.objects.filter_by_role(role=Roles.GROUP_ADMIN, roles__group=self)",
"def getDevelAdmins(self):\n\t\tpass",
"def get_admin_users(self):\n for user in self.get_all_users():\n if user.is_admin:\n yield user",
"def get_admins() -> Tuple[int, ...]:\n db = get_database_connection()\n admins = [int(admin_id) for admin_id in db.lrange('avito:admin_list', 0, -1)]\n return tuple(admins)",
"def __update_admin_cache(self):\n\n header = connect(self.__path)\n curs = header.cursor()\n curs.execute(\"SELECT * FROM admins WHERE id IS NOT NULL\")\n data = curs.fetchall()\n newlist = []\n for item in data:\n newlist.append(item[0])\n self.__admins = newlist",
"def get_local_admins():\n admin_list = get_users_config()\n response = []\n\n if \"users\" not in admin_list[\"result\"]:\n return response\n\n if isinstance(admin_list[\"result\"][\"users\"][\"entry\"], list):\n for entry in admin_list[\"result\"][\"users\"][\"entry\"]:\n response.append(entry[\"name\"])\n else:\n response.append(admin_list[\"result\"][\"users\"][\"entry\"][\"name\"])\n\n return response"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
gets a single employee out the database on an id | def get_employee(self, id):
from Employee import Employee
cursor = self.dbconnect.get_cursor()
cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))
row = cursor.fetchone()
return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]) | [
"def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)",
"def get(id_: int):\n logger.debug('Retrieving employee by id %i.', id_)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.id == id_\n ).scalar()\n if not employee:\n raise Exception(f\"Can't get employee with id {id_}\", )\n except Exception as exception:\n logger.error('An error occurred while retrieving employee with id %i.'\n ' Exception: %s', id_, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by id %i.', id_)\n return employee",
"def get_employee_by_id(employee_id):\n where = Employee.id == employee_id\n query = get_employee_query(where)\n return query.one()",
"def get_employee(self, employee_id):\n cursor = self.dbconnect.get_cursor()\n\n try:\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE LOWER(id)=LOWER(%s)', (employee_id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n\n except:\n self.dbconnect.rollback()\n raise",
"def get(self, id):\n employee = self.query.get(id)\n return employee if employee and employee.is_deleted == False else None",
"def get_employee_by_id(emp_id):\n if emp_id == None:\n raise DataLayerError(message=\"Employee ID Required\")\n if not isinstance(emp_id, int):\n raise DataLayerError(\n f\"Found type {type(emp_id)}, required type {type(0)}\")\n if emp_id <= 0:\n raise DataLayerError(f\"Invalid employee ID : {emp_id}\")\n try:\n connection = DBConnection.getConnection()\n cursor = connection.cursor()\n cursor.execute(\"select * from employee where emp_id=%s\", (emp_id,))\n row = cursor.fetchone()\n connection.commit()\n if row == None:\n raise DataLayerError(\n message=f\"Employee ID : {emp_id} does not exists\")\n emp_id = row[0]\n name = row[1]\n designation_code = row[2]\n dob = row[3]\n day = dob.strftime(\"%d\")\n month = dob.strftime(\"%m\")\n year = dob.strftime(\"%Y\")\n salary = row[4]\n gender = row[5]\n indian = row[6]\n pan_no = row[7]\n aadhar = row[8]\n employee = Employee(emp_id, name, designation_code, day,\n month, year, salary, gender, indian, pan_no, aadhar)\n except Error as error:\n raise DataLayerError(message=error.msg)\n finally:\n try:\n if cursor.is_open():\n cursor.close()\n if connection.is_connected():\n connection.close()\n except:\n pass\n return employee",
"def find_employee_by_id(self,id):\n self.employee_id()\n if id in self.emp_id:\n print(self.emp_id[id])\n return self.emp_id[id]\n else:\n print(\"Employee not found\")",
"def find_employee(id):\n for employee in emp_data:\n if employee.em_no == id:\n return employee\n return None",
"def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)",
"def _get_employee(self):\n emp = self.env['hr.employee'].search([('attendance_code', '=', self.code)]) or False\n if emp:\n self.employee = emp[0].id",
"def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))",
"def get_employee_by_name(self, name):\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE name=%s', (name,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])",
"def get_employee(self, name):\n name = name.upper()\n if name in EMPLOYEE_MAP:\n name = EMPLOYEE_MAP[name]\n try:\n int(name)\n emps = Employee.objects.filter(id=name)\n except ValueError:\n if name == 'NN':\n emps = Employee.objects.filter(user__first_name='Nieznany')\n elif Employee.objects.filter(user__username__iexact=name).exists():\n emps = Employee.objects.filter(user__username__iexact=name)\n elif len(name) == 3:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:3],\n status=0)\n else:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:],\n status=0)\n if not emps:\n emps = Employee.objects.filter(user__username__istartswith=name)\n if len(emps) == 1:\n return emps[0]\n elif len(emps) > 1:\n self.stdout.write(self.style.ERROR('Multiple employee matches for {}. Choices are:'\n .format(name)))\n for e in emps:\n self.stdout.write(self.style.ERROR(' -{}'.format(e.user.get_full_name())))\n else:\n raise CommandError('Employee {} does not exists! Fix your input file.'.format(name))\n\n return None",
"def obtener_empresa(id):\r\n try:\r\n return Empresa.objects.get(id=id)\r\n except Exception,e:\r\n return None",
"def get_employeeOnName(self, name):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE name=%s ', (name,))\n if (cursor.rowcount != 0):\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n else:\n return None",
"def employers_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=EMPLOYER_TYPE_URI,\n rdf_type_name=EMPLOYER_TYPE_NAME, \n kls=Employer)",
"def lookup(cls, id: int):\n record = query_db(\n \"select id, amount, description, user_id from expenses where id = ?\",\n [id],\n one=True,\n )\n if record is None:\n raise NotFound()\n return cls(**record)",
"def test_api_can_get_employee_by_id(self):\n res = self.client().get(service_url_emp+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))",
"def get_examen(self, id_examen):\n\n self.logger.info(\"\\t[+] get_examen [+]\")\n self.logger.info(f\"\\t[+] id_examen {id_examen} [+]\")\n try:\n return self.examens.select().where(self.examens.columns.id_examen == id_examen).execute()\n except Exception as e:\n self.logger.critical(\"\\t[-] Exception occured [-]\")\n self.logger.critical(\"\\t\" + str(e))\n self.logger.critical(\"\\t[-] Exception occured [-]\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
gets a single employee out the database on a name | def get_employeeOnName(self, name):
from Employee import Employee
cursor = self.dbconnect.get_cursor()
cursor.execute('SELECT * FROM employee WHERE name=%s ', (name,))
if (cursor.rowcount != 0):
row = cursor.fetchone()
return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])
else:
return None | [
"def get_employee_by_name(self, name):\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE name=%s', (name,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])",
"def get_employee(self, id):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])",
"def get_employee(self, name):\n name = name.upper()\n if name in EMPLOYEE_MAP:\n name = EMPLOYEE_MAP[name]\n try:\n int(name)\n emps = Employee.objects.filter(id=name)\n except ValueError:\n if name == 'NN':\n emps = Employee.objects.filter(user__first_name='Nieznany')\n elif Employee.objects.filter(user__username__iexact=name).exists():\n emps = Employee.objects.filter(user__username__iexact=name)\n elif len(name) == 3:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:3],\n status=0)\n else:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:],\n status=0)\n if not emps:\n emps = Employee.objects.filter(user__username__istartswith=name)\n if len(emps) == 1:\n return emps[0]\n elif len(emps) > 1:\n self.stdout.write(self.style.ERROR('Multiple employee matches for {}. Choices are:'\n .format(name)))\n for e in emps:\n self.stdout.write(self.style.ERROR(' -{}'.format(e.user.get_full_name())))\n else:\n raise CommandError('Employee {} does not exists! Fix your input file.'.format(name))\n\n return None",
"def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))",
"def get_employee(self, employee_id):\n cursor = self.dbconnect.get_cursor()\n\n try:\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE LOWER(id)=LOWER(%s)', (employee_id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n\n except:\n self.dbconnect.rollback()\n raise",
"def get_employee_by_id(emp_id):\n if emp_id == None:\n raise DataLayerError(message=\"Employee ID Required\")\n if not isinstance(emp_id, int):\n raise DataLayerError(\n f\"Found type {type(emp_id)}, required type {type(0)}\")\n if emp_id <= 0:\n raise DataLayerError(f\"Invalid employee ID : {emp_id}\")\n try:\n connection = DBConnection.getConnection()\n cursor = connection.cursor()\n cursor.execute(\"select * from employee where emp_id=%s\", (emp_id,))\n row = cursor.fetchone()\n connection.commit()\n if row == None:\n raise DataLayerError(\n message=f\"Employee ID : {emp_id} does not exists\")\n emp_id = row[0]\n name = row[1]\n designation_code = row[2]\n dob = row[3]\n day = dob.strftime(\"%d\")\n month = dob.strftime(\"%m\")\n year = dob.strftime(\"%Y\")\n salary = row[4]\n gender = row[5]\n indian = row[6]\n pan_no = row[7]\n aadhar = row[8]\n employee = Employee(emp_id, name, designation_code, day,\n month, year, salary, gender, indian, pan_no, aadhar)\n except Error as error:\n raise DataLayerError(message=error.msg)\n finally:\n try:\n if cursor.is_open():\n cursor.close()\n if connection.is_connected():\n connection.close()\n except:\n pass\n return employee",
"def get(id_: int):\n logger.debug('Retrieving employee by id %i.', id_)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.id == id_\n ).scalar()\n if not employee:\n raise Exception(f\"Can't get employee with id {id_}\", )\n except Exception as exception:\n logger.error('An error occurred while retrieving employee with id %i.'\n ' Exception: %s', id_, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by id %i.', id_)\n return employee",
"def get_employee_by_id(employee_id):\n where = Employee.id == employee_id\n query = get_employee_query(where)\n return query.one()",
"def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)",
"def _get_employee(self):\n emp = self.env['hr.employee'].search([('attendance_code', '=', self.code)]) or False\n if emp:\n self.employee = emp[0].id",
"def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)",
"def check_employee(self, employee_name):\n for employee in self.employees:\n if employee.name == employee_name:\n return employee",
"def find_employee(id):\n for employee in emp_data:\n if employee.em_no == id:\n return employee\n return None",
"def get_employees():\n employees = list()\n try:\n connection = DBConnection.getConnection()\n cursor = connection.cursor()\n cursor.execute(\"select * from employee;\")\n rows = cursor.fetchall()\n connection.commit()\n for data in rows:\n emp_id = data[0]\n name = data[1]\n designation_code = data[2]\n dob = data[3]\n day = dob.strftime(\"%d\")\n month = dob.strftime(\"%m\")\n year = dob.strftime(\"%Y\")\n salary = data[4]\n gender = data[5]\n indian = data[6]\n pan_no = data[7]\n aadhar = data[8]\n employee = Employee(emp_id, name, designation_code, day,\n month, year, salary, gender, indian, pan_no, aadhar)\n employees.append(employee)\n except Error as error:\n raise DataLayerError(message=error.msg)\n finally:\n try:\n if cursor.is_open():\n cursor.close()\n if connection.is_connected():\n connection.close()\n except:\n pass\n return employees",
"def get_name(self, name):\n\n self.curr.execute(''' SELECT * FROM parties WHERE name=%s''', (name,))\n party = self.curr.fetchone()\n self.conn.commit()\n self.curr.close()\n return party",
"def get(self, name, user):\n connection = self.connect()\n cursor = connection.cursor()\n cursor.execute(self.sql[\"get\"], {\"name\": name, \"user\": user})\n result = cursor.fetchone()\n if result is not None:\n return result[0].split()\n else:\n raise DoesNotExistException(\n \"Could not find an applicable saved roll with that name.\"\n )",
"def get(self, id):\n employee = self.query.get(id)\n return employee if employee and employee.is_deleted == False else None",
"def lookup_by_employee(self):\n self.clear_console()\n print(self.format_header('Lookup by Employee'))\n\n get_employee_names = Entry.select().distinct(Entry.employee_name).execute()\n employee_names = set()\n [employee_names.add(employee.employee_name.title()) for employee in get_employee_names]\n # allow the user to choose from a name\n [print(name) for name in employee_names]\n while True:\n chosen_name = input('Choose an employee: ').strip()\n if chosen_name == '':\n print('Please choose from the list of available names, or type \"back\" to return to lookup menu')\n continue\n # get the number of matches...\n name_matches = ConsoleUI.get_matches(chosen_name, employee_names)\n if len(name_matches) > 1:\n # clarify...\n while True:\n print(self.clear_console())\n print('Multiple matches:')\n [print(name) for name in name_matches]\n specific_name = input('Choose an exact name, or enter \"all\" to get all matches: ').lower().strip()\n if specific_name == 'all':\n # return all results\n entries = Entry.select().order_by(Entry.created_timestamp.desc())\n entries = entries.where(fn.Lower(Entry.employee_name).contains(chosen_name.lower()))\n self.display_one_at_a_time(entries)\n return True\n elif specific_name.title() in name_matches:\n # return the specific name results\n entries = Entry.select().order_by(Entry.created_timestamp.desc())\n entries = entries.where(fn.Lower(Entry.employee_name) == specific_name.lower())\n self.display_one_at_a_time(entries)\n return True\n elif len(name_matches) == 1:\n # run the query\n entries = Entry.select().order_by(Entry.created_timestamp.desc())\n entries = entries.where(fn.Lower(Entry.employee_name) == chosen_name.lower())\n self.display_one_at_a_time(entries)\n return True\n elif chosen_name == 'Back':\n break\n else:\n # no matches...\n print('Please choose from the list of available names, or type \"back\" to return to lookup menu')",
"def test_get_existing_employee(self):\n with self.app.app_context():\n self.assertTrue(es.get_by_name('Mary'))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
adds an employee to the database | def add_employee(self, empl):
cursor = self.dbconnect.get_cursor()
try:
cursor.execute('INSERT INTO employee values(default,%s,%s,%s,%s,%s,%s,%s,%s)',
(empl.name, empl.email, empl.office, empl.research_group, empl.title, empl.internOrExtern,
empl.active, empl.promotor))
cursor.execute('SELECT LASTVAL()')
eid = cursor.fetchone()[0]
empl.id = eid
# get id and return updated object
self.dbconnect.commit()
except(Exception, self.dbconnect.get_error()) as error:
self.dbconnect.rollback()
raise Exception('\nUnable to save Employee!\n(%s)' % (error)) | [
"def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise",
"def add_employee(employee):\n if employee == None:\n raise DataLayerError(message=\"Employee Required\")\n if not isinstance(employee, Employee):\n raise DataLayerError(\n f\"Found type {type(employee)}, required type <class 'Employee'>\")\n if employee.has_exceptions:\n raise DataLayerError(exceptions=employee.exceptions)\n if employee.emp_id != 0:\n raise DataLayerError(\n \"Employee ID must be assigned zero, as it is auto generated.\")\n try:\n connection = DBConnection.getConnection()\n cursor = connection.cursor()\n cursor.execute(\n \"select emp_id from employee where name=%s\", (employee.name,))\n rows = cursor.fetchall()\n cursor.execute(\"insert into employee (name,designation_code,DOB,salary,gender,is_indian,pan_no,aadhar_no) values (%s,%s,%s,%s,%s,%s,%s,%s)\", (\n employee.name, employee.designation_code, employee.dob, employee.salary, employee.gender.capitalize(), employee.indian, employee.pan_no, employee.aadhar))\n if len(rows) > 0:\n print(\n f\"There are {len(rows) + 1} employees with same name now!\")\n employee.emp_id = cursor.lastrowid\n connection.commit()\n except Error as err:\n raise DataLayerError(message=err.msg)\n finally:\n try:\n if cursor.is_open():\n cursor.close()\n if connection.is_connected():\n connection.close()\n except:\n pass",
"def test_add_employee(self):\n with self.app.app_context():\n id_ = ds.get_by_name('HR').id\n es.add(name='Mary', birthday=date(2000, 9, 22), department=id_,\n working_since=date(2020, 1, 13), salary=30000.0)\n self.assertEqual(len(es.get_all()), 2)",
"def add_employee(self, employee):\n self.employees.add(employee)",
"def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201",
"def add_employee(schema, employee_json):\n employee = schema.load(employee_json, session=db.session)\n db.session.add(employee)\n db.session.commit()\n return employee",
"def post(self):\n data = EmployeeRegister.parser.parse_args()\n new_employee_id = str(uuid.uuid4())\n\n while EmployeeModel.find_by_id(new_employee_id):\n # if this id is already in use\n new_employee_id = str(uuid.uuid4())\n\n employee = EmployeeModel(**data, employee_id=new_employee_id)\n employee.save_to_db()\n\n return {\"message\": \"Employee successfully added to the system\"}, 201 # 201 - Created",
"def add_employee(self, emp):\n if emp not in self.employees: \n self.employees.append(emp)",
"def add_employee(self, name, rank): \n raise Exception('TODO IMPLEMENT ME !')",
"def post(self, request):\n form = EmployeeForm(request.POST)\n if form.is_valid():\n new_employee = form.save()\n return redirect('employee:employee_detail', id=new_employee.id)\n else:\n return render(request, 'employee/add-employee.html', {'form': form, 'func': 'Add'})",
"def add_employee(self, first_name, last_name):\n self.switch_main_menu(\"PIM\")\n self.click_menu(\"Add Employee\")\n self.pim = AddEmployee(self.driver)\n self.pim.add_user_employee(first_name, last_name)",
"def create_employee(self):\n try:\n name = input(\"Enter name: \")\n if not name.isalpha():\n print(\"Invalid data format. Name should contain only alphabets. \")\n return False\n email = input(\"Enter email: \")\n if not InputValidations.validate_email(email):\n return False\n employee = EmployeeModel(name=name, email=email)\n self.admin_repository.create_employee(employee)\n print(\"Employee created successfully!\")\n return True\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False",
"def add_employee(self, employee):\n with open(\"./data/employees.csv\", \"a+\") as employees_file:\n username = employee.get_username()\n password = employee.get_password()\n employees_file.write(\"{},{}\\n\".format(username, password))",
"def create_employee(self,personal_identity):\r\n new_emp = Employee(*personal_identity)\r\n registration_str = new_emp.get_registration_str()\r\n\r\n return_value = self.save_object_to_DB(\"employee\",registration_str)\r\n return return_value",
"def register_employee(message):\n employee_name = message.text\n employee_id = message.from_user.id\n employee = Employee(employee_id, employee_name)\n\n employees.register_employee(employee)\n bot.send_message(message.chat.id, \"Добро пожаловать,\" + str(employee_name) + \". Теперь вы можете выбрать действие\" +\n \"\\n\\nЕсли действия не отобразились, нажмите на \" +\n \"иконку клавиатуры возле поля ввода\",\n reply_markup=start_keyboard)\n bot.register_next_step_handler(message, action_choose)",
"def post(self, request, *args, **kwargs):\n response = super().post(request, *args, **kwargs)\n company = self.object\n company.create_employee_data()\n return response",
"def add_offer(self):\r\n self.add_company_row()\r\n job_title = self.form.job_title.data\r\n pay_offer = self.form.pay_offer.data\r\n contact_email = self.form.recruiter_email.data\r\n job_description = self.form.job_description.data\r\n timestamp = datetime.now()\r\n offer = Offer(\r\n job_title=job_title,\r\n pay_offer=pay_offer,\r\n contact_email=contact_email,\r\n job_description=job_description,\r\n date=timestamp.strftime(\"%a %b %y\"),\r\n timestamp=timestamp,\r\n employer=self.company\r\n )\r\n db.session.add(offer)\r\n db.session.commit()",
"def add_person():\n # get values from user\n responses = accept_inputs([\"Name\"])\n # insert into db\n query_no_results(\"insert into person (name) values(?)\", [responses[\"Name\"]])\n print(\"New person created\")",
"def test_add_employee_with_deduction(self):\n\t\tloginpage = login.LoginPage(self.driver)\n\t\tself.assertTrue(loginpage.is_title_matches())\n\t\t# instantiate page object, check that we have the right title\n\t\tloginpage.adminlogin()\n\t\t# log in \n\t\thomepage = home.HomePage(self.driver)\n\t\t# we're now on a new page, so we instantiate a home page obj, check title\n\t\tself.assertTrue(homepage.is_title_matches())\n\t\thomepage.add_new_employee(\"Bobert\")\n\t\tself.assertTrue(homepage.is_employee_matches(\"2\", \"Bobert\"))\n\t\t# add \"employee 1\" and check it matches. Then we do it again with the other ones\n\t\thomepage.add_new_employee(\"James\")\n\t\tself.assertTrue(homepage.is_employee_matches(\"3\", \"James\"))\n\t\thomepage.add_new_employee(\"Kelly\")\n\t\tself.assertTrue(homepage.is_employee_matches(\"4\", \"Kelly\"))\n\t\t#tests against the different cases in the data and makes sure to look at the correct rows."
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
adds a role to an employee | def add_employeeRole(self, id, role):
cursor = self.dbconnect.get_cursor()
try:
cursor.execute('INSERT INTO employeeRoles values(%s,%s)',
(id, role))
# get id and return updated object
self.dbconnect.commit()
except(Exception, self.dbconnect.get_error()) as error:
self.dbconnect.rollback()
raise Exception('\nUnable to save EmployeeRole!\n(%s)' % (error)) | [
"def test_add_role(self):\n pass",
"def add_role(self, role):\n self.roles.add(unicode(role))",
"async def add_role(\n client,\n event,\n user: ('user', 'User to add role to'),\n role: ('role', 'The role to give'),\n):\n # Check for permissions\n if not event.user_permissions.can_manage_roles:\n abort('You need `manage roles` permission to invoke this command.')\n \n if not event.guild.cached_permissions_for(client).can_manage_roles:\n abort('I need `manage roles` permission to execute this command.')\n \n if not event.user.has_higher_role_than(role):\n abort('You must have higher role than the role you are trying to give.')\n \n if not client.has_higher_role_than(role):\n abort('I must have higher role than the role you are trying to give.')\n \n # Using `.copy_to` on forms works as well.\n return ADD_ROLE_FORM.copy_with(\n title = f'Add role {role.name} to {user.full_name}',\n custom_id = f'add_role.{user.id}.{role.id}',\n )",
"def create_role(self, name: str, description: str) -> None:\n ...",
"def add_role(self, role, parents=[]):\r\n self._roles.setdefault(role, set())\r\n self._roles[role].update(parents)",
"def test_add_role_simple(self):\n pass",
"async def add(self,user:discord.Member,*role:discord.Role):\n await self.bot.add_roles(user,*role)\n await self.bot.says_edit(\"I added a role to {}\".format(user.name))",
"async def newrole(self, ctx, *, role: discord.Role = None):\n if role is None:\n role_id = await ctx.con.fetchval('''\n SELECT role_id FROM newrole WHERE guild_id = $1\n ''', ctx.guild.id)\n if role_id is None:\n await ctx.send('A role has not been set for this guild.')\n return\n role = discord.utils.get(ctx.guild.roles, id=role_id)\n if role is None:\n async with ctx.con.transaction():\n await ctx.con.execute('''\n DELETE FROM newrole WHERE guild_id = $1\n ''', ctx.guild.id)\n await ctx.send('A role has not been set for this guild.')\n else:\n await ctx.send(f'\"{role.name}\" is the current role for new members.')\n else:\n async with ctx.con.transaction():\n await ctx.con.execute('''\n INSERT INTO newrole (guild_id, role_id) VALUES ($1, $2)\n ON CONFLICT (guild_id) DO\n UPDATE SET role_id = $2 WHERE newrole.guild_id = $1\n ''', ctx.guild.id, role.id)\n await ctx.send(f'The role \"{role.name}\" will be given to new members.')\n bot_role = discord.utils.get(ctx.me.roles, managed=True)\n if role is not None and role.position > bot_role.position:\n await ctx.send(f'Please move \"{role}\" above \"{bot_role}\" in the role list.')",
"async def addrole(ctx, member: discord.Member, *, role: discord.Role):\r\n if not role:\r\n return await ctx.send(\"That role does not exist.\")\r\n await member.add_roles(role)\r\n await ctx.send(f\"Added: {role.name} \")",
"def test_add_role_simple_post(self):\n pass",
"def add_employee(self, employee):\n self.employees.add(employee)",
"def add_role(self, name):\n role = Role.by_name(name)\n if not role:\n role = Role(name)\n db.add(role)\n if not role in self.roles:\n self.roles.append(role)",
"async def roleaddset(self, ctx):\n pass",
"def test_create_role(self):\n pass",
"def assign_role(login, role_name):\n _assign_role(login, role_name)",
"def test_edit_role_add_new_role(self):\n # Add node with controller role\n Nodes().nodes_discovered[0].checkbox.click()\n RolesPanel().controller.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n # Add cinder role\n with Nodes() as n:\n n.nodes[0].checkbox.click()\n n.edit_roles.click()\n RolesPanel().cinder.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n with Nodes() as n:\n self.assertIn(ROLE_CONTROLLER, n.nodes[0].roles.text,\n 'Controller role')\n self.assertIn(ROLE_CINDER, n.nodes[0].roles.text,\n 'Cinder role')",
"def addRole( self, role_id, title='', description='' ):\n if self._roles.get( role_id ) is not None:\n raise KeyError, 'Duplicate role: %s' % role_id\n\n self._roles[ role_id ] = { 'id' : role_id\n , 'title' : title\n , 'description' : description\n }",
"def set_role(userid, role, group, request=None):",
"async def addrole(self, ctx, invite: str, *, rolename: str):\n server = ctx.message.server\n if server.id not in self.set:\n self.server_init(server)\n role = discord.utils.get(server.roles, name=rolename)\n if role is None:\n await self.bot.say(\"That role doesn't seem to exist.\")\n return\n if server.me.top_role <= role:\n await self.bot.say(\"That role is higher or equal with my highest role - I can't assign that!\")\n return\n try:\n invites = await self.bot.invites_from(server)\n except:\n await self.bot.say(\"There is no invites on this server.\")\n return\n if invite.startswith(\"https://discord.gg/\") or invite.startswith(\"http://discord.gg/\"):\n tmp = invite.split('/')\n invite = tmp[3]\n await self.inv_update(server) # make sure we're working on updated database\n for inv in invites:\n tmp = inv.url.split('/')\n if tmp[3] == invite and inv.url in self.set[server.id][\"invites\"]:\n if \"role\" in self.set[server.id][\"invites\"][inv.url]:\n prev_role = discord.utils.get(server.roles, id=self.set[server.id][\"invites\"][inv.url][\"role\"])\n if prev_role is not None:\n e = discord.Embed(description=\"This invite already has a role assigned to it. Replace?\")\n e.add_field(name=\"Invite\", value=inv.url, inline=False)\n e.add_field(name=\"Current Role\", value=prev_role.name, inline=True)\n e.add_field(name=\"Replacing Role\", value=role.name, inline=True)\n confirm = await self.confirm_msg(ctx, e, 60)\n if confirm is False:\n return\n self.set[server.id][\"invites\"][inv.url][\"role\"] = role.id\n await self.bot.say(\"The `{}` role is now bound to the `{}` invite.\".format(role.name, invite))\n self.save()\n return\n await self.bot.say(\"That invite doesn't seem to exist.\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
gets al the roles of an employee | def get_employeeRoles(self, id):
cursor = self.dbconnect.get_cursor()
cursor.execute('select * from employeeRoles where employee=%s', (id,))
roles = list()
for row in cursor:
roles.append(row[1])
return roles | [
"def get_roles(role):",
"def get_roles(self) -> List[RoleObj]:\n ...",
"def get_roles(self, principal_id):",
"def _get_roles(self):\n return api.tuskar.OvercloudRole.list(self.request)",
"def get_roles():\n\n # Get instance of RolesOperations Class\n roles_operations = RolesOperations()\n\n # Call get_roles method\n response = roles_operations.get_roles()\n\n if response is not None:\n\n # Get the status code from response\n print('Status Code: ' + str(response.get_status_code()))\n\n if response.get_status_code() in [204, 304]:\n print('No Content' if response.get_status_code() == 204 else 'Not Modified')\n return\n\n # Get object from response\n response_object = response.get_object()\n\n if response_object is not None:\n\n # Check if expected ResponseWrapper instance is received.\n if isinstance(response_object, ResponseWrapper):\n\n # Get the list of obtained Role instances\n roles_list = response_object.get_roles()\n\n for role in roles_list:\n # Get the DisplayLabel of each Role\n print(\"Role DisplayLabel: \" + str(role.get_display_label()))\n\n # Get the forecastManager User instance of each Role\n forecast_manager = role.get_forecast_manager()\n\n # Check if forecastManager is not None\n if forecast_manager is not None:\n\n # Get the ID of the forecast Manager\n print(\"Role Forecast Manager User-ID: \" + str(forecast_manager.get_id()))\n\n # Get the name of the forecast Manager\n print(\"Role Forecast Manager User-Name: \" + str(forecast_manager.get_name()))\n\n # Get the ShareWithPeers of each Role\n print(\"Role ShareWithPeers: \" + str(role.get_share_with_peers()))\n\n # Get the Name of each Role\n print(\"Role Name: \" + role.get_name())\n\n # Get the Description of each Role\n print(\"Role Description: \" + str(role.get_description()))\n\n # Get the Id of each Role\n print(\"Role ID: \" + str(role.get_id()))\n\n # Get the reporting_to User instance of each Role\n reporting_to = role.get_reporting_to()\n\n # Check if reporting_to is not None\n if reporting_to is not None:\n # Get the ID of the reporting_to User\n print(\"Role ReportingTo User-ID: \" + str(reporting_to.get_id()))\n\n # Get the name of the reporting_to User\n print(\"Role ReportingTo User-Name: \" + str(reporting_to.get_name()))\n\n # Get the AdminUser of each Role\n print(\"Role AdminUser: \" + str(role.get_admin_user()))\n\n # Check if the request returned an exception\n elif isinstance(response_object, APIException):\n # Get the Status\n print(\"Status: \" + response_object.get_status().get_value())\n\n # Get the Code\n print(\"Code: \" + response_object.get_code().get_value())\n\n print(\"Details\")\n\n # Get the details dict\n details = response_object.get_details()\n\n for key, value in details.items():\n print(key + ' : ' + str(value))\n\n # Get the Message\n print(\"Message: \" + response_object.get_message().get_value())",
"def get_role(self, employee):\n\n company_id = self.context.get('company_id')\n company_member = CompanyMember.objects.get(user_id=employee.id,\n company_id=company_id)\n return BaseCompanyMemberSerializer(company_member, read_only=True).data",
"def roles():\n pass",
"def get_roles(self):\n path = \"%s/services/impala/roles\" % self.__base_path\n response = self.__session.get(path)\n self.__check_status_code(response.status_code)\n return response.json()",
"def get_user_roles(self):\n url = 'userroles'\n result = self.get(url)\n return result.get('userroles', result)",
"def get_roles(self, **search_args):\n return self.openbis.get_role_assignments(person=self, **search_args)",
"def get_admins(self):\n from Employee import Employee\n admins = list()\n cursorRoles = self.dbconnect.get_cursor()\n cursorRoles.execute('select * from employeeRoles where role=\\'admin\\'')\n for row in cursorRoles:\n admins.append(self.get_employee(row[0]))\n return admins",
"def role_list(self):\n method = '/auth/role/list'\n data = {}\n return self.call_rpc(method, data=data)",
"def get_roles(self, request):\n guild = Guild.objects.get(discord_id=request.data['guild'])\n role_type = request.data['role_type'] \n \n response = [] \n role_list = None\n\n # CHARACTER TYPE FIX\n if role_type in (\"mains\", \"seconds\", \"pockets\"):\n role_type = role_type[:-1].upper()\n \n CHARACTER_STATUS = (\"MAIN\", \"SECOND\", \"POCKET\")\n \n # GET ROLE LIST\n if role_type == \"tiers\":\n role_list = guild.tier_set.order_by('-weight').all()\n elif role_type == \"regions\":\n role_list = guild.region_set.all()\n elif role_type in CHARACTER_STATUS:\n role_list = guild.character_set.all()\n \n \n # GET MEMBERS\n for role in role_list:\n if role_type in CHARACTER_STATUS:\n role_members = Player.objects.filter(main__in=role.main_set.filter(status=role_type)).all()\n else:\n role_members = role.player_set.all()\n \n role_detail = {'id': role.discord_id}\n \n role_detail['players'] = [player.discord_id for player in role_members]\n response.append(role_detail)\n \n # SORT RESPONSE\n if role_type != \"tiers\":\n response.sort(key=lambda role : len(role['players']), reverse=True)\n\n return Response({'roles': response}, status=status.HTTP_200_OK)",
"def list_roles():\n\tsession = get_session()\n\tresponse = session.get(\"{url}/api/roles\".format(url=get_registry_url()))\n\treturn response.json()[\"results\"]",
"def test_list_roles(self):\n pass",
"def get_all_roles():\n\n cnx,cur = connect_to_db() #get connection with db\n cur.execute(\"SELECT DISTINCT role FROM movie_crew\")\n lst = cur.fetchall()\n cur.close()\n cnx.close()\n return lst",
"def roles(self):\n return self.m_roles",
"def list_roles(self):\n return list(self.roles)",
"def get_roles():\n\n roles = Roles.get_all()\n return RoleSerializer().jsonify(roles, many=True)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
changes the data of an employee | def change_employee(self, employee):
cursor = self.dbconnect.get_cursor()
try:
if employee.id == None:
raise Exception('no id given')
cursor.execute('select * from employee where employeeID=%s', (str(employee.id),))
if cursor.rowcount == 0:
raise Exception('no employee found with that id')
cursor.execute(
'update employee set name= %s,email= %s,office= %s,title= %s,INTernORextern= %s,active= %s,promotor= %s where employeeID=%s',
(employee.name, employee.email, employee.office, employee.title,
employee.internOrExtern, employee.active, employee.promotor, employee.id))
self.dbconnect.commit()
except:
self.dbconnect.rollback()
raise Exception('unable to change employee') | [
"def updateEmp(self, data, new_data, field):\n self.__data.updateEmp(data, new_data, field)",
"def update_employee(employee):\n if employee == None:\n raise DataLayerError(message=\"Employee Required\")\n if not isinstance(employee, Employee):\n raise DataLayerError(\n f\"Found type {type(employee)}, required type <class 'Employee'>\")\n if employee.has_exceptions:\n raise DataLayerError(exceptions=employee.exceptions)\n if employee.emp_id == 0:\n raise DataLayerError(\n \"Employee ID must not be assigned zero, it must already exist.\")\n try:\n connection = DBConnection.getConnection()\n cursor = connection.cursor()\n cursor.execute(\n \"select emp_id from employee where emp_id=%s\", (employee.emp_id,))\n rows = cursor.fetchall()\n if len(rows) != 1:\n raise DataLayerError(\n message=f\"{employee.emp_id} does not exists\")\n cursor.execute(\"update employee set name=%s, designation_code=%s, DOB=%s, salary=%s, gender=%s, is_indian=%s, pan_no=%s, aadhar_no=%s where emp_id=%s\", (employee.name,\n employee.designation_code, employee.dob, employee.salary, employee.gender.capitalize(), employee.indian, employee.pan_no, employee.aadhar, employee.emp_id))\n cursor.execute(\n \"select name from employee where emp_id=%s\", (employee.emp_id,))\n updated_data = cursor.fetchall()\n connection.commit()\n if len(updated_data) != 1 or (updated_data[0][0] != employee.name):\n raise Error(\n \"Updation failed due to unknown interrupt, please try again\")\n except Error as err:\n raise DataLayerError(message=err.msg)\n finally:\n try:\n if cursor.is_open():\n cursor.close()\n if connection.is_connected():\n connection.close()\n except:\n pass",
"def update_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET name = %s, email = %s, office = %s, extra_info = %s, picture_location = %s, '\n 'research_group = %s, title = %s, is_external = %s, is_admin = %s, is_active = %s '\n 'WHERE id = %s;',\n (obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active, obj.e_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise",
"def test_update_employee(self):\n with self.app.app_context():\n mary = es.get_by_name('Mary')\n es.update(id_=mary.id,\n name=mary.name,\n birthday=mary.birthday,\n working_since=mary.working_since,\n salary=333333,\n department=ds.get_by_name('HR').id)\n self.assertNotEqual(mary.salary, 30000.0)",
"def change_employee(self,changed_identity):\r\n\r\n changed_emp = Employee(*changed_identity)\r\n changed_str = changed_emp.get_changes_registration_str()\r\n\r\n return_value = self.change_object_in_DB(\"employee\", changed_str, changed_emp._id) # Bring 'id' seperately, so next function can find line number\r\n return return_value",
"def update_employee(employee):\n employee_id = get_employee_input_int(\"Enter the employee id you want to update\")\n newGrade = get_employee_input_int(\"Enter the new grade for \")\n db.update_employee(employee_id, newGrade)\n print(employee.full_name + \"'s grade value has been updated to :-> \", newGrade)",
"def test_modify_employee_data(self):\n self._login(credentials=self.credentials)\n self.take_snapshot()\n self._click_link('employee_data')\n self._click_link('view_data') # reached at view employee data\n cred = {\"username\": \"+919999999904\", \"password\": \"r@123456789\"}\n self.selenium.find_element_by_link_text(cred['username']).click() # select employee by number\n _field = self.selenium.find_element_by_name('email') # select field\n self.take_snapshot()\n _field.clear() # clear field\n _field.send_keys(faker.company_email()) # enter random email\n self.take_snapshot()\n self.selenium.find_element_by_xpath(\"//input[contains(@type, 'submit')]\").click()\n self.selenium.find_element_by_link_text(cred['username']).click()\n self.take_snapshot()\n self.selenium.find_element_by_partial_link_text(\"Reset Password\").click()\n _password = self.selenium.find_element_by_name('password') # select password element\n self.take_snapshot()\n _password.clear() # clear password\n _password.send_keys(cred['password']) # enter password\n self.take_snapshot()\n self.selenium.find_element_by_xpath(\"//input[contains(@type, 'submit')]\").click()\n self.take_snapshot()\n self._logout()\n self._login(credentials=cred)\n self._logout()",
"def setEmployees(self, employees):\n self.employees = employees",
"def employee(self, employee: object):\n\n self._employee = employee",
"def onchange_employee(self):\n if self.employee_id:\n self.benefit_start = self.employee_id.benefit_start\n self.balance_benefit = self.employee_id.balance_benefit",
"def update(self, request, pk):\n serializer = data_serializers.UpdateEmployeeRequestSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n new_employee_entity = self.controller.update_employee(request_data=request_data)\n serializer = data_serializers.PresentEmployeeDataSerializer(new_employee_entity)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (domain_exceptions.EmployeeIDIsNotUnique,\n domain_exceptions.WorkArrangementPercentageOutOfRange,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.ObjectEntityDoesNotExist\n ) as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def employees(self, employees: object):\n\n self._employees = employees",
"def change_salary(id=\"\"):\n emp = find_employee(id) # finds employee\n if emp:\n print(\"Employee found:\\n\", emp) # Displays employee found\n emp.salary = getPositiveFloat(\"Please enter new salary : \") # Sets new salary\n else:\n print(\"Employee not found...\") # In case employee not found",
"def make_salaried(self,salary,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"2\"\n print(\"{}{}\".format(name,\" was successfully changed to be a salaried employee\"))\n self.emp_dict[id][7] = salary\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()",
"def employee(self, employee: Employee):\n\n self._employee = employee",
"def onchange_employee(self):\n self.department_id = self.employee_id.department_id.id or False\n self.company_id = self.employee_id.company_id.id or False\n self.branch_id = self.employee_id.branch_id or False",
"def onchange_employee(self):\n cr, uid, context = self.env.args\n emp_id = self.employee_id.id\n if emp_id:\n #If employee is there fetch the related room and bed\n bed_id = self.env['beds.beds'].search([('employee_id','=',self.employee_id.id),('room_id.accommodation_id','=',context.get('accommodation_id'))])\n if not bed_id:\n emp_name = self.employee_id.name\n raise ValidationError('The Employee is not accommodated here!' + emp_name)\n self.bed_id = bed_id.id\n self.room_id = bed_id.room_id.id",
"def populate_employee_info(self):\n self.me_employee_gbox.setEnabled(True)\n self.me_delete_employee_btn.setEnabled(True)\n self.me_update_employee_btn.setEnabled(True)\n self.edit_employee = self.employee_list[self.me_employee_listview.selectedIndexes()[0].row()]\n self.me_id_lbl.setText(str(self.edit_employee.employee_id))\n self.me_name_field.setText(self.edit_employee.employee_name)\n self.me_password_field.setText(self.edit_employee.employee_password)\n self.me_role_cbox.setCurrentIndex(int(self.edit_employee.role))\n self.me_new_employee_b = False",
"def onchange_employee_id(self):\n if self.employee_id:\n self.job_id = self.employee_id.job_id.id"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get all the projects of an employee IMPORTANT not all fields will be completed only the fields in the project table and that of the activeYears | def get_employeeProjects(self, id):
from Project import Project
cursor = self.dbconnect.get_cursor()
cursor.execute('select project from projectpromotor where employee=%s', (id,))
projectsId = list()
for row in cursor:
projectsId.append(row[0])
projects = list()
for projId in projectsId:
cursor.execute('select * from project where projectID=%s',
(projId,)) # returns exactly one row from the table
row = cursor.fetchone()
project = Project(row[0], row[1], row[2], row[3])
cursor.execute('select year from projectYearConnection where projectID=%s', (projId,))
years = list()
for row in cursor:
years.append(row[0])
project.activeYear = years
projects.append(project)
return projects | [
"def getProjects(self):\n getParams = {'active':1,'account__active':1}\n return self._getJSONResponse('project', getParams)",
"def get_projects():\n if current_user.get_id() is None:\n return\n with database.engine.begin() as connection:\n result = connection.execute(select(\n [models.projects.c.project_id, models.projects.c.name, models.projects.c.path, models.projects.c.creation_date, models.projects.c.user_id, func.count(models.objects.c.object_id).label('object_count')])\n .select_from(models.projects.outerjoin(models.objects))\n .where(and_(models.projects.c.active == True, models.projects.c.user_id == current_user.id))\n .group_by(models.projects.c.project_id)\n .order_by(models.projects.c.project_id))\n projects = [dict(row) for row in result]\n for project in projects:\n user = models.User.query.filter_by(\n id=project['user_id']).first()\n if user:\n project['email'] = user.email\n return projects",
"def get_projects(self):\n rps = self.start_date\n\n return Project.objects.filter(\n Q(active=True)\n & Q(\n Q(start_date__lte=rps)\n | Q(\n Q(start_date__gte=rps)\n & Q(start_date__lte=datetime.datetime.now().date())\n )\n | Q(start_date__isnull=True)\n )\n & Q(\n Q(end_date__gte=rps)\n | Q(end_date__isnull=True)\n )\n )",
"def get_projects(self):\n self.cur.execute('SELECT * FROM projects;')\n projects = [Projects(row) for row in self.cur.fetchall()]\n return projects",
"def findProject(self):\n\n # check that we actually have json\n if hasattr(cherrypy.request, 'json'):\n data = cherrypy.request.json\n else:\n data = dict()\n\n # TODO validate projectNumbers; verify projectNumbers is list of ints\n\n validNum = []\n result = []\n if 'projectNumbers' in data:\n # if not admin, find only authorized projects\n if cherrypy.session['role'] == 'admin':\n validNum = data['projectNumbers']\n else:\n for pNum in data['projectNumbers']:\n if pNum in cherrypy.session['projectNumbers']:\n validNum.append(pNum)\n\n for project in validNum:\n for res in self.colProjects.find({'projectNumber': project, 'status': 'active'}):\n res['_id'] = str(res['_id'])\n result.append(res)\n #~ return result\n else:\n if cherrypy.session['role'] != 'admin':\n validNum = cherrypy.session['projectNumbers']\n for project in validNum:\n for res in self.colProjects.find({'projectNumber': project, 'status': 'active'}):\n res['_id'] = str(res['_id'])\n result.append(res)\n #~ return result\n else: # is admin\n for res in self.colProjects.find({'status': 'active'}):\n res['_id'] = str(res['_id'])\n result.append(res)\n #~ return result\n\n for res in result:\n res = self.calculateBudget(res[\"projectNumber\"])\n return result",
"def _list_projects():\n client = faculty.client(\"project\")\n user_id = _get_authenticated_user_id()\n projects = client.list_accessible_by_user(user_id)\n return projects",
"def selectable_projects():\n\n db = current.db\n s3db = current.s3db\n\n # Lookup projects with provider self-registration\n ptable = s3db.project_project\n ttable = s3db.project_project_tag\n join = ttable.on((ttable.project_id == ptable.id) & \\\n (ttable.tag == \"APPLY\") & \\\n (ttable.value == \"Y\") & \\\n (ttable.deleted == False))\n query = (ptable.deleted == False)\n rows = db(query).select(ptable.id,\n ptable.name,\n join = join,\n )\n projects = {row.id: row.name for row in rows}\n return projects",
"def _get_open_projects_info():\n projects = Project.objects.filter(project_open=True).order_by(\"created_at\")\n projects_sum_hours = []\n for project in projects:\n time_entries_pro_project = TimeEntry.objects.filter(project=project)\n used_hours = _sum_hours(time_entries_pro_project)\n hours_percent = _calculate_hours_percent(used_hours, project.stimated_hours)\n projects_sum_hours.append(\n {\n \"hours_percent_number\": hours_percent,\n \"hours_percent\": f\"{hours_percent}%\",\n \"worked_hours\": used_hours,\n \"project\": project,\n }\n )\n return projects_sum_hours",
"def db_projects():\n return [{\"name\": \"IT\"}, {\"name\": \"Financial\"}, {\"name\": \"Failed\"}]",
"def get_projects(session):\n cursuses = [1, 21] # cursus ids from which to get the projects\n project_names = []\n\n for cursus in cursuses:\n # Get all the projects from 1 cursus, very slow process because projects endpoint contains\n # a lot of information\n projects = get_all_pages(session, f'/cursus/{cursus}/projects', 100, {'filter[exam]': False})\n for project in projects:\n # Create dictionary containing project id and project name ans set in bigger dict\n project_names.append({'id': project['id'], 'name': project['name']})\n\n return project_names",
"def active_projects(self):\n return self.projects.filter(active=True)",
"def get_user_projects(self):\n return self.projects.all()",
"def projectData(self):\n\n # check that we actually have json\n if hasattr(cherrypy.request, 'json'):\n data = cherrypy.request.json\n else:\n raise cherrypy.HTTPError(400, 'No data was given')\n\n # prepare the sort, order, and page number\n sortBy = checkValidData('sortBy', data, str, default='projectNumber',\n optional=True)\n\n if sortBy not in ('projectNumber', 'sponsorName', 'projectName', 'membersEmails', 'defaultBudget'):\n raise cherrypy.HTTPError(\n 400, 'sortBy must be any of projectNumber, sponsorName, projectName, membersEmails, defaultBudget. Not %s'\n % sortBy)\n\n order = checkValidData('order', data, str, default='ascending',\n optional=True)\n\n if order not in ('ascending', 'descending'):\n raise cherrypy.HTTPError(\n 400, 'order must be ascending or descending. Not %s.' % order)\n\n direction = pm.ASCENDING if order == 'ascending' else pm.DESCENDING\n\n pageNumber = checkValidData('pageNumber', data, int, default=0,\n optional=True)\n\n if pageNumber < 0:\n raise cherrypy.HTTPError(\n 400, \"Invalid pageNumber format. \"\n \"Expected nonnegative integer. \"\n \"See: %s\" % pageNumber)\n\n pageSize = 10 # TODO stretch goal make this configurable\n\n myFilter = getProjectKeywords(data.get('keywordSearch', {}))\n #myFilter['status'] = 'current'\n\n # finds projects who are current only\n projectCursor = self.colProjects.find(myFilter).collation({ 'locale': 'en' }).sort(sortBy, direction)\n\n retProjects = []\n for proj in projectCursor[pageSize*pageNumber: pageSize*(pageNumber+1)]:\n myProj = dict()\n myProj['_id'] = str(proj['_id'])\n for key in ('sponsorName', 'projectName', 'membersEmails', 'defaultBudget'):\n myProj[key] = proj.get(key, '')\n\n myProj['projectNumber'] = proj.get('projectNumber', '')\n\n # if myUser['role'] != 'admin':\n # for key in ('projectNumbers', 'course'):\n # myUser[key] = user[key]\n\n retProjects.append(myProj)\n\n\n return retProjects",
"def get_active_and_main_fundraiser(self, queryset=None):\n if queryset is None:\n queryset = super(ProjectManager, self).get_queryset()\n active_projects = queryset.filter(\n project_status=Project.ACTIVE, org_name__in=['SSF', 'MAINSSF']\n ).order_by('end_date')\n return active_projects",
"def _getsubprojects(self):\r\n result = self._session.execute(\"query -t project \\\"recursive_is_member_of('%s', none)\\\" -u -f \\\"%%objectname\\\"\" % self.objectname, ObjectListResult(self._session))\r\n return result.output",
"def getProjects(self , teamindex = 0):\r\n if self.userdata == {}:\r\n self.reloadUserdata()\r\n projects = self.userdata['user']['teams'][teamindex]['projects']\r\n return projects",
"def test_get_projects_expanded(self):\n pass",
"def _get_projects(current_project_name):\n projects = []\n\n unique_project_changes = Change.objects.order_by().values(\n 'project_name').distinct()\n for change in unique_project_changes:\n projects.append(change['project_name'])\n\n # sort alphabetically\n projects.sort()\n\n # insert 'all' option as it should be present always\n projects.insert(0, PROJECT_ALL)\n\n # if current_project_name is valid, make it the first element in list so\n # that it shows up as selected in project choice drop down\n if current_project_name != PROJECT_ALL and current_project_name in projects:\n projects.remove(current_project_name)\n projects.insert(0, current_project_name)\n elif current_project_name != PROJECT_ALL:\n logging.error(\"Currently selected project %s not found in any changes.\"\n \" Removing from list.\", current_project_name)\n logging.debug(\"Returning list of projects: %r\", projects)\n return projects",
"def get_projects(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/projects\").json()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The Simple Moving Average (SMA) is calculated by adding the price of an instrument over a number of time periods and then dividing the sum by the number of time periods. The SMA is basically the average price of the given time period, with equal weighting given to the price of each period. Simple Moving Average SMA = ( Sum ( Price, n ) ) / n | def SimpleMovingAverage(self, timeperiod = 14):
return ta.SMA(self.data.close,timeperiod) | [
"def SMA(values, n):\n return pd.Series(values).rolling(n).mean()",
"def SMA(df, time_period=30):\n close = df['close']\n return talib.SMA(close, timeperiod=time_period)",
"def sma(df_prices, i_period):\r\n\r\n i_len = len(df_prices)\r\n assert i_len >= i_period\r\n\r\n df_sma = pd.rolling_mean(df_prices,i_period)\r\n ## copy value for datas before i_period ( wait for enough data )\r\n df_sma[:i_period -1] = df_prices[:i_period - 1]\r\n df_sma.name = 'sma'+str(i_period)\r\n return df_sma",
"def get_SMA(close_data, time_period):\n \n # List to store moving average results\n SMA = list(range(0, len(close_data) - time_period))\n \n # Compute moving average\n for ii in range(len(SMA)):\n # Previous days index\n index = range(ii, ii + time_period)\n \n # Get data for previous days\n prev_days = close_data.iloc[index]\n \n # Sum previous days\n summation = np.sum(prev_days)\n \n # Get average\n avg = summation/time_period\n \n # Save results to list\n SMA[ii] = avg\n \n # Define column label\n label = f\"{time_period}-SMA\"\n \n # Get corresponding dates for moving_avg\n dates = close_data.index[time_period:]\n \n # Convert list into Pandas Series\n SMA = pd.Series(SMA, name = label, index = dates)\n \n return SMA",
"def sma(matrix, interval):\n\n # declare empty SMA numpy array\n s = np.zeros((matrix.shape[0] - interval))\n\n # calculate the value of each point in the Simple Moving Average array\n for t in range(0, s.shape[0]):\n s[t] = np.sum(matrix[t:t + interval])/interval\n\n return s",
"def test_sma_method(data):\n arr = data[\"close\"].to_numpy(copy=True).astype(float)\n ti_sma = np.append([np.nan for i in range(9)], ti.sma(arr, period=10))\n pd_sma = pd.Series(arr).rolling(10).mean().to_numpy()\n assert np.allclose(ti_sma, pd_sma, equal_nan=True)",
"def SMA(self, values, n):\n return pd.Series(values).rolling(n).mean()",
"def sma(self) -> float:\n return self._sma",
"def moving_average (s, n) :\n if n == 1 :\n yield from s \n else :\n m = float (n)\n s = iter (s)\n w = DL_Ring (next (s) / m for k in range (n))\n ma = sum (w.values ())\n yield ma\n for x in s :\n x_m = x / m\n ma -= w.pop_front ()\n ma += x_m\n w.append (x_m)\n yield ma",
"def moving_average_forecast(series, window_size):\n mov = np.cumsum(series)\n mov[window_size:] = mov[window_size:] - mov[:-window_size]\n return mov[window_size - 1:-1] / window_size",
"def moving_average(series, n):\r\n return np.average(series[-n:])",
"def moving_average(a, n=3) :\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n",
"def get_EMA(close_data, time_period):\n \n # List to store moving average results\n EMA = list(range(0, len(close_data) - time_period))\n \n # Calculate SMA to use as the first EMA\n initial_EMA = get_SMA(close_data, time_period)[0]\n \n # Calculate initial weight\n k = 2.0 / (time_period + 1)\n\n # Compute EMA\n for ii in range(len(EMA)):\n # Set index\n index = time_period + ii\n \n # Get current Close price\n close_temp = close_data[index]\n \n # Compute current EMA\n if ii == 0:\n EMA_temp = (close_temp - initial_EMA)*k + initial_EMA\n else:\n EMA_temp = (close_temp - EMA[ii-1])*k + EMA[ii-1]\n\n\n # Save results to list\n EMA[ii] = EMA_temp\n \n # Define column label\n label = f\"{time_period}-EMA\"\n \n # Get corresponding dates for moving_avg\n dates = close_data.index[time_period:]\n \n # Convert list into Pandas Series\n EMA = pd.Series(EMA, name = label, index = dates)\n \n return EMA",
"def ma(df, close_price_col_name=\"Close\", ma_col_name=\"MA\"):\r\n\r\n\t# Check N positive integer\r\n\twhile True:\r\n\r\n\t\tN = input(\"Please input period for moving average model (a positive integer (recommend: 10, 20, 50, 100, or 200 )): \")\r\n\r\n\t\ttry:\r\n\t\t\tif int(N) > 0:\r\n\t\t\t\tbreak\r\n\r\n\t\t\telif \".\" in N:\r\n\t\t\t\tprint(\"Please enter a positive integer, not a float \")\r\n\t\t\t\tcontinue\r\n\r\n\t\t\telif int(N) < 0:\r\n\t\t\t\tprint(\"Please enter a positive integer, not a negative one \")\r\n\t\t\t\tcontinue\r\n\r\n\t\texcept ValueError:\r\n\t\t\tprint(\"Please input a positive integer, not a string\")\r\n\t\t\tcontinue\r\n\r\n\t# Add column to store value of MA\r\n\tdf[ma_col_name] = df[close_price_col_name].rolling(window=int(N), min_periods=0).mean()\r\n\r\n\t# Plot\r\n\tplt.plot(df[close_price_col_name], label=\"Closing price\")\r\n\tplt.plot(df[ma_col_name], label=\"Moving average \" + N + \" days\")\r\n\tplt.title(\"Visualization of Moving Average \" + N + \" days\")\r\n\tplt.xlabel(\"Date\")\r\n\tplt.ylabel(\"Closing price\")\r\n\tplt.legend(loc='upper left')\r\n\tplt.show()\r\n\r\n\tdel df[ma_col_name] # delete the MA column for re-graphing\r",
"def moving_average(data, window_size=100): #used this approach https://stackoverflow.com/questions/11352047/finding-moving-average-from-data-points-in-python \n cumsum_vec = np.cumsum(np.insert(data, 0, 0)) \n ma_vec = (cumsum_vec[window_size:] - cumsum_vec[:-window_size]) / window_size\n return ma_vec",
"def find_sma_em(array, strides=3, window_length=4):\n try:\n # Checking if the Given Vector is of Type List\n if not issubclass(type(array), list):\n # If not, an error is raised\n raise AttributeError(\"Array Should be Given as a List\")\n\n # Creating a map object with values either true (if the corresponding item is either integer or float) or false\n type_check = map(lambda arg: issubclass(type(arg), int) or issubclass(type(arg), float), array)\n\n # Checking if all the elements of the list is of type integer or float\n if not all(type_check):\n # If not, an error is raised\n raise AttributeError(\"The Array Elements Should be Integer or Float\")\n\n if (not isinstance(window_length, int)) or window_length < 2:\n raise AttributeError(\"window length should be of type integer and greater than 1\")\n\n if (not isinstance(strides, int)) and (not isinstance(strides, list)):\n raise AttributeError(\"Stride Should be given as int or list \")\n\n # Creating a Dataframe from the given array\n given_array = pd.DataFrame(array)\n\n # Calculating the simple moving average\n given_array['SMA'] = given_array.iloc[:, 0].rolling(window=window_length).mean()\n\n if isinstance(strides, int):\n created_strides = [strides] * len(given_array)\n else:\n created_strides = strides\n\n # Creating the list of indices for which the moving average needs to be found\n # With first value equal to window_length -1\n indices = [window_length-1]\n\n for stride in created_strides:\n # The next index would be previous index + stride\n new_inde = indices[-1] + stride\n if new_inde > len(given_array) - 1:\n # If the new index exceeds the length of data, then the loop is broken\n break\n indices.append(new_inde)\n\n given_array.loc[given_array.index.isin(indices), 'SMA_WITH_STRIDE'] = \\\n given_array.iloc[:, 0].rolling(window=window_length).mean()\n\n # Calculating the expanding window mean\n given_array['EM'] = given_array.iloc[:, 0].expanding().mean()\n\n LOGGER.info(\"The SMA and EM for the given array are: \\n%s\\n\", given_array.iloc[:25, :])\n except AttributeError as err:\n LOGGER.error(\"AttributeError: %s\", err)",
"def ema(s, n):\r\n\r\n ema = []\r\n j = 1\r\n\r\n #get n sma first and calculate the next n period ema\r\n sma = sum(s[:n]) / n\r\n multiplier = 2 / float(1 + n)\r\n ema.append(sma)\r\n\r\n #EMA(current) = ( (Price(current) - EMA(prev) ) x Multiplier) + EMA(prev)\r\n ema.append(( (s[n] - sma) * multiplier) + sma)\r\n\r\n #now calculate the rest of the values\r\n for i in s[n+1:]:\r\n tmp = ( (i - ema[j]) * multiplier) + ema[j]\r\n j = j + 1\r\n ema.append(tmp)\r\n\r\n return ema",
"def Moving_average(Signal=[],windowsize=3):\n smooth = np.cumsum(Signal, dtype=float)\n smooth[windowsize:] = smooth[windowsize:] - smooth[:-windowsize]\n smooth = smooth[windowsize - 1:] / windowsize\n smoothdata = smooth.tolist()\n #使平滑後的數據長度與初始數據長度能一致\n for n in range( windowsize-1 ):\n smoothdata.insert(0,smooth[0])\n \n return smoothdata",
"def moving_average(audio, N):\n\n cumsum, moving_aves = [0], []\n audio_abs = abs(audio)\n for i, x in enumerate(audio_abs, 1):\n cumsum.append(cumsum[i-1] + x)\n if i>=N:\n moving_ave = (cumsum[i] - cumsum[i-N])/N\n moving_aves.append(moving_ave)\n return(moving_aves)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Average True Range Is a lagging indicator, used to provide insights into volatility. | def AverageTrueRange(self, timeperiod = 14):
return ta.ATR(self.data.high, self.data.low, self.data.close, timeperiod) | [
"def compute_average_true_ranges(context):\n if context.is_debug:\n start_time = time()\n \n rolling_window = 21\n moving_average = 20\n \n for market in context.prices.items:\n context.average_true_range[market] = ATR(\n context.prices[market].high[-rolling_window:],\n context.prices[market].low[-rolling_window:],\n context.prices[market].close[-rolling_window:],\n timeperiod=moving_average\n )[-1]\n \n if context.is_test:\n assert(len(context.average_true_range) > 0)\n \n if context.is_debug:\n time_taken = (time() - start_time) * 1000\n log.debug('Executed in %f ms.' % time_taken)\n assert(time_taken < 1024)",
"def lag_average(self, fmin, fmax):\n avgcross = self.freq_average(fmin, fmax)\n lag = np.angle(avgcross) / (2 * np.pi * np.mean([fmin, fmax]))\n return lag",
"def should_average(self):\n return self._should_average",
"def get_averaging_status(self):\n # 0 - Off, 1 - On\n averaging = self.get_num(self.query(\"AVERO?\"))\n # bool(0) = False, bool(1) = True\n return bool(averaging)",
"def conditional_mean(self, gp):\n raise NotImplementedError",
"def indicator_logic(self, candle):\n # Initialize variables\n sma, upper, lower = 2, -1.0, -1.0 # 'sma' = 2 is clever way to generate 'a favor' e 'contra'\n\n # Append close to moving average\n self.ma.append(candle.close[self.up])\n\n # Check if there are enough candles to calculate moving average\n if len(self.ma) == self.period:\n\n # Initialize upper and lower values for when there is a valid moving average\n upper, lower = 0.0, 0.0\n\n # Calculates moving average\n avg = sum(self.ma) / self.period\n\n # Tells if current close is above moving average\n sma = 1 if candle.close[self.up] > avg else 0\n\n # Calculates standard deviation\n std = pstdev(self.ma)\n\n # Calculates difference between current candle and moving average\n diff = candle.close[self.up] - avg\n\n # Transform difference to standard deviations\n if diff > 0 and std != 0:\n # Value of above\n upper = diff / std\n elif diff < 0 and std != 0:\n # Value if below\n lower = -diff / std\n\n # Returns values\n return sma, upper, lower",
"def is_increasing_trend(self):\n\n try:\n return self.history[-3].value < \\\n self.history[-2].value < self.history[-1].value\n except IndexError:\n return False",
"def get_avg(self):\n return self.sum / max(len(self.window), 1)",
"def atr(close: pd.Series, high: pd.Series, low: pd.Series, periods: int = 14) -> pd.Series:\n close.name = \"CLOSE\"\n high.name = \"HIGH\"\n low.name = \"LOW\"\n data = pd.concat([close, high, low], axis=1)\n\n data[\"CLOSE PREVIOUS\"] = data[\"CLOSE\"].shift(1)\n true_range = pd.concat([data[\"HIGH\"] - data[\"LOW\"],\n abs(data[\"HIGH\"] - data[\"CLOSE PREVIOUS\"]),\n abs(data[\"LOW\"] - data[\"CLOSE PREVIOUS\"])\n ], axis=1).max(axis=1)\n average_true_range = true_range.rolling(periods).mean()\n\n average_true_range.name = \"ATR\"\n return average_true_range",
"def _get_average(self):\r\n if self.at_bats == 0:\r\n return 0.0\r\n\r\n return old_div(float(self.singles + self.doubles +\r\n self.triples + self.home_runs), self.at_bats)",
"def __compute_rolling_avg(self, dataframe, rolling_range=None):\n\n if not rolling_range:\n rolling_range = self.trials / 10\n\n rolling_avg = pd.stats.moments.rolling_mean(\n dataframe,\n rolling_range,\n )\n\n return rolling_avg",
"def test_running_average():\n avg = OnlineAverage(rate=None)\n values = np.random.normal(size=15)\n for subset in [values[:3], values[3:9], values[9:14], values[14:]]:\n avg.update(subset)\n assert np.allclose(avg.value, np.mean(values))",
"def og_range(self):\n if self.exceptions:\n return self.exceptions\n else:\n return (float(self.stats['og']['low']), float(self.stats['og']['high']))",
"def average(self,start_window, end_window):\n query = f\"select avg(age) from `{self.table_id}` where timestamp between {start_window} and {end_window}\"\n query_job = self.client.query(query)\n return query_job.result",
"def _affects_average_changed(self):\r\n self.at_bats += 1",
"def avgBaseline():\n return aBaseline",
"def mean_error_rate(y_true, y_interval):\n _check_interval_array(y_interval)\n\n wrong_intervals = ((y_true < y_interval[:, 0]) | (y_true > y_interval[:, 1])).sum()\n\n return wrong_intervals / y_true.shape[0]",
"def average(self):\n return (self.current + self.last) / 2.0",
"def get_average_gain(self):\n return self.average_gain"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Starting at the current column header, shift to the right col_shift times | def get_header(col_current, col_shift):
header = col_current
for i in range(col_shift):
header = header.right
return header | [
"def _pad_columns(self):\n previous_row = self.previous\n\n if previous_row is None:\n # Start of layout; don't need to pad columns\n return\n\n while len(previous_row.end) < len(self.start):\n previous_row.end.append(set())\n\n while len(previous_row.end) > len(self.start):\n self.start.append(set())\n\n while not previous_row.end[-1] and not self.start[-1]:\n del previous_row.end[-1], self.start[-1]",
"def shift_right(self):\n self.pointer = (self.pointer + 1) % len(self.data)",
"def new_column( self, delta = 1, ):\n self.ix_row = 0\n self.ix_col += delta",
"def move_col_front(df):\n cols = list(df)\n cols2 = list(df)\n cols.insert(0, cols.pop(cols.index(len(df.columns)-1)))\n df = df.loc[:, cols]\n df.columns = cols2\n return df",
"def moveUpRight(self):\n if self.curr_col < self.num_col - 1:\n self.curr_col += 1\n if self.curr_row > 0:\n self.curr_row -= 1",
"def col_data_mover_at(row, col):\n if col == 0:\n return NAME_SCHEME[\"memory move\"].format(prefix=f\"l{row}\")\n else:\n return NAME_SCHEME[\"register move right\"].format(pe=f\"pe_{row}_{col - 1}\")",
"def _reset_header(self):\n new_header = []\n for col_name in self.header:\n is_left = self.left_cols.get(col_name)\n if is_left:\n new_header.append(col_name)\n self.header = new_header",
"def tab(self):\r\n for stop in sorted(self.tabstops):\r\n if self.cursor.x < stop:\r\n column = stop\r\n break\r\n else:\r\n column = self.columns - 1\r\n\r\n self.cursor.x = column",
"def mirrorHoriz():",
"def table_scroll_cols(self, n):\n self._table_first_col += n\n\n if self._table_first_col < 0:\n self._table_first_col = 0\n\n if self._table_first_col >= self._cur_table[0]:\n self._table_first_col = self._cur_table[0] -1",
"def matrix_left_move_column(k, matrix):\n pass",
"def shift(row):\r\n new_lst = []\r\n for i in range(4):\r\n if row[i] != 0:\r\n new_lst.append(row[i])\r\n if len(new_lst) < len(row):\r\n new_lst.extend([0] * (len(row) - len(new_lst)))\r\n row = new_lst\r\n\r\n return row",
"def _modify_columns(self, cols, X, y=None):",
"def _col_to_zero (self,ws,col):\n\n c=1 # 1-based line counter \n for each in ws[col]:\n if c != 1: #IGNORE HEADER\n #print (str(c)+': '+each.value)\n each.value=0 # None doesn't work\n c+=1\n return c",
"def _drag_col(self, event):\n x = self._dx + event.x # get dragged column new left x coordinate\n self._visual_drag.place_configure(x=x) # update column preview position\n # if one border of the dragged column is beyon the middle of the\n # neighboring column, swap them\n if (self._dragged_col_neighbor_widths[0] is not None and\n x < self._dragged_col_x - self._dragged_col_neighbor_widths[0] / 2):\n self._swap_columns('left')\n elif (self._dragged_col_neighbor_widths[1] is not None and\n x > self._dragged_col_x + self._dragged_col_neighbor_widths[1] / 2):\n self._swap_columns('right')\n # horizontal scrolling if the cursor reaches the side of the table\n if x < 0 and self.xview()[0] > 0:\n # scroll left and update dragged column x coordinate\n self.xview_scroll(-10, 'units')\n self._dragged_col_x += 10\n elif x + self._dragged_col_width / 2 > self.winfo_width() and self.xview()[1] < 1:\n # scroll right and update dragged column x coordinate\n self.xview_scroll(10, 'units')\n self._dragged_col_x -= 10",
"def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)",
"def _swapcolumns(self):\n return self.reindex_axis([self.columns[1], self.columns[0]], axis=1)",
"def __rshift__(self, other: Any) -> ColumnOperators:\n return self.operate(rshift, other)",
"def moveUpLeft(self):\n if self.curr_col > 0:\n self.curr_col -= 1\n if self.curr_row > 0:\n self.curr_row -= 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove the specified column header from the header chain All rows that appear in this column are also removed | def remove_col(self, col_header):
# Remove the column header from the header chain
col_header.right.left = col_header.left
col_header.left.right = col_header.right
# Loop down through the column and remove the rows
cell = col_header.down
while cell != col_header:
row_cell = cell.right
# Move through all cells in this row and update their up/down links
while row_cell != cell:
row_cell.down.up = row_cell.up
row_cell.up.down = row_cell.down
row_cell.header.sum -= 1
# Move on to the next cell in the row
row_cell = row_cell.right
# Move on to the next row
cell = cell.down | [
"def unremove_col(self, col_header):\n # Add the column head back into the chain\n col_header.right.left = col_header\n col_header.left.right = col_header\n # Loop up through the column and add the rows back in\n # Doing this in exactly the reverse order of the removing ensures that we return\n # to the state we were in before the removal\n cell = col_header.up\n while cell != col_header:\n row_cell = cell.left\n # Move through all cells in this row and update their up/down links\n while row_cell != cell:\n row_cell.down.up = row_cell\n row_cell.up.down = row_cell\n row_cell.header.sum += 1\n # Move on to the next cell in the row\n row_cell = row_cell.left\n # Move on to the next row\n cell = cell.up",
"def rmcolumn(input, output, columns):\n from astropy.io import fits\n\n with fits.open(input) as hdulist:\n for hdu in hdulist:\n hdu.header.pop(columns) \n hdu.data.del_col(columns) \n hdulist.writeto(output, overwrite=True)",
"def remove(self, header):\n key = header.lower()\n if key not in self._set:\n raise IndexError(header)\n self._set.remove(key)\n for idx, key in enumerate(self._headers):\n if key.lower() == header:\n del self._headers[idx]\n break\n if self.on_update is not None:\n self.on_update(self)",
"def remove_columns(tx, header, columns_to_remove):\n print(\"\\nRemove columns...\")\n num_removed = 0\n for col in columns_to_remove:\n tx = np.delete(tx, col - num_removed, 1)\n header = np.delete(header, col - num_removed + 2)\n num_removed += 1\n print(\"\\n... finished.\")\n return tx, header",
"def header_clean_row(row_of_data):\n header = row_of_data.get('header')[1]\n z = list(set(remove_filler_words([header])))\n return z",
"def _remove_column(self, column: str) -> None:\n dtype, loc, order = self._column_info.pop(column).values\n self._data[dtype] = np.delete(self._data[dtype], loc, axis=1)\n if self._data[dtype].shape[1] == 0:\n del self._data[dtype]\n\n for col, col_obj in self._column_info.items():\n if col_obj.dtype == dtype and col_obj.loc > loc:\n col_obj.loc -= 1",
"def delColumn(self,column):\n data = self.data\n for rowData in data.values():\n if column in rowData:\n del rowData[column]\n self.hasChanged = True",
"def removeColumn( self, columnName ):\n column = [a for a in self.columns if a.name==columnName]\n if not column: raise ValueError(\"column '%s' isn't present in the table!\" % columnName)\n if len(column) > 1: raise Exception(\"Column named '%s' is not unique!\" % columnName)\n index = self.columns.index( column[0] )\n self.columns.pop( index )\n [row.pop(index) for row in self.data]\n for idx, col in enumerate(self.columns): col.index = idx",
"def removeHeader(self, name):\n\n name = name.lower()\n if name in self._raw_headers:\n del self._raw_headers[name]\n del self._headers[name]",
"def remove_header(self, key):\n del self._headers[key]",
"def remove_header( self, *names ):\n for name in names:\n del self[ name.strip() ]",
"def deleteHeader(header, fields, delimiter=default_delimiter):\n for f in fields: del header[f]\n\n return header",
"def _reset_header(self):\n new_header = []\n for col_name in self.header:\n is_left = self.left_cols.get(col_name)\n if is_left:\n new_header.append(col_name)\n self.header = new_header",
"def remove(self, column):\n self._columns.remove(column)",
"def cleanup_headers(data_source, pandas_dataframe):\n drop_headers(data_source, pandas_dataframe)\n rename_headers(data_source, pandas_dataframe)",
"def remove_headers(headers, name):\n i = 0\n name = name.lower()\n for j in range(len(headers)):\n if headers[j][0].lower() != name:\n if i != j:\n headers[i] = headers[j]\n i += 1\n del headers[i:]\n return headers",
"def clear_header(self, name):\r\n if name in self._headers:\r\n del self._headers[name]",
"def drop_headers(data_source, pandas_dataframe):\n for column_name in pandas_dataframe.columns:\n if column_name in HEADERS_CHANGE[data_source]['drop_columns']:\n pandas_dataframe.drop(column_name, axis=1, inplace=True)\n return pandas_dataframe",
"def update_headers(headers: List[str]) -> List[str]:\n print(f\"Original headers: {headers}\")\n new_headers = []\n for i, header in enumerate(headers):\n if header in REMOVE_HEADERS:\n # If the column must be removed, store its index tofilter it out in entries' rows\n remove_headers_indexes.append(i)\n else:\n # Otherwise simply keep it\n new_headers.append(header)\n print(f'New headers: {new_headers}')\n return new_headers"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds the specified column header back into the header chain Also adds all rows that this column removed back in | def unremove_col(self, col_header):
# Add the column head back into the chain
col_header.right.left = col_header
col_header.left.right = col_header
# Loop up through the column and add the rows back in
# Doing this in exactly the reverse order of the removing ensures that we return
# to the state we were in before the removal
cell = col_header.up
while cell != col_header:
row_cell = cell.left
# Move through all cells in this row and update their up/down links
while row_cell != cell:
row_cell.down.up = row_cell
row_cell.up.down = row_cell
row_cell.header.sum += 1
# Move on to the next cell in the row
row_cell = row_cell.left
# Move on to the next row
cell = cell.up | [
"def remove_col(self, col_header):\n # Remove the column header from the header chain\n col_header.right.left = col_header.left\n col_header.left.right = col_header.right\n # Loop down through the column and remove the rows\n cell = col_header.down\n while cell != col_header:\n row_cell = cell.right\n # Move through all cells in this row and update their up/down links\n while row_cell != cell:\n row_cell.down.up = row_cell.up\n row_cell.up.down = row_cell.down\n row_cell.header.sum -= 1\n # Move on to the next cell in the row\n row_cell = row_cell.right\n # Move on to the next row\n cell = cell.down",
"def _reset_header(self):\n new_header = []\n for col_name in self.header:\n is_left = self.left_cols.get(col_name)\n if is_left:\n new_header.append(col_name)\n self.header = new_header",
"def add_header(self, *column_headers):\n raise NotImplementedError",
"def update_headers(headers: List[str]) -> List[str]:\n print(f\"Original headers: {headers}\")\n new_headers = []\n for i, header in enumerate(headers):\n if header in REMOVE_HEADERS:\n # If the column must be removed, store its index tofilter it out in entries' rows\n remove_headers_indexes.append(i)\n else:\n # Otherwise simply keep it\n new_headers.append(header)\n print(f'New headers: {new_headers}')\n return new_headers",
"def rmcolumn(input, output, columns):\n from astropy.io import fits\n\n with fits.open(input) as hdulist:\n for hdu in hdulist:\n hdu.header.pop(columns) \n hdu.data.del_col(columns) \n hdulist.writeto(output, overwrite=True)",
"def add_header(self, *column_headers):\n header = \"<tr>\"\n header += \" \".join(f\"<th>{header}</th> \" for header in column_headers)\n header += \"</tr>\\n\"\n self.result += header",
"def remove_columns(tx, header, columns_to_remove):\n print(\"\\nRemove columns...\")\n num_removed = 0\n for col in columns_to_remove:\n tx = np.delete(tx, col - num_removed, 1)\n header = np.delete(header, col - num_removed + 2)\n num_removed += 1\n print(\"\\n... finished.\")\n return tx, header",
"def headers_processor(headers):\n def apply_headers(row_set, row):\n _row = []\n pairs = izip_longest(row, headers)\n for i, (cell, header) in enumerate(pairs):\n if cell is None:\n cell = Cell(None)\n cell.column = header\n if not cell.column:\n cell.column = \"column_%d\" % i\n cell.column_autogenerated = True\n _row.append(cell)\n return _row\n return apply_headers",
"def set_column_headers(self, headers):\n if isinstance(self.columns.idx[0], int):\n self.data = [sorted(headers)] + self.data\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment\n\n elif isinstance(self.columns.idx[0], str):\n datum = {}\n for i, key in enumerate(self.columns.idx):\n datum.update({key: headers[i]})\n self.data = [datum] + self.data\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment",
"def remove(self, header):\n key = header.lower()\n if key not in self._set:\n raise IndexError(header)\n self._set.remove(key)\n for idx, key in enumerate(self._headers):\n if key.lower() == header:\n del self._headers[idx]\n break\n if self.on_update is not None:\n self.on_update(self)",
"def addColumn(self, id, header = id):",
"def add_headers(dataset, headers):\r\n dataset.columns = headers\r\n return dataset",
"def header_data_columns(head_line, data_cols, header):\n\n colnames = head_line.split(\",\")\n\n # Remove triling blancks and end of lines\n colnames = [x.strip() for x in colnames]\n\n # Difference between columns in the header and in the data\n diff = len(data_cols) - len(colnames)\n\n if diff > 0:\n # Add dum headers\n dums = \"\"\n for idiff in range(diff):\n dums = dums + \",dum\" + str(idiff)\n\n new_head = str(head_line.rstrip()) + dums + \" \\n\"\n header.append(new_head)\n\n elif diff < 0:\n sys.exit(\n \"STOP novonix_clean.header_data_columns \\n\"\n + \"REASON less data columns than header names \\n\"\n )\n else:\n header.append(head_line)\n\n return",
"def infer_header(self):\n\n try:\n # if we think we already have a header, make sure it makes sense\n self.validate_header()\n\n # if we didn't have one or it was wrong, try to read from file\n if not self.header:\n self.infer_header_row()\n\n self.clean_header()\n except:\n pass\n\n # if we didn't find a header row, then make one up\n if not self.header:\n self.header_inferred = False\n self.header = ['attr%d' % i for i in xrange(len(self.types))]\n\n # ensure proper length by manufacturing extra header columns\n if len(self.header) < len(self.types):\n for i in xrange(len(self.types) - len(self.header)):\n self.header.append('attr%d' % i)\n # TODO: Why is self.types not appended to?\n\n # trim extra columns\n self.header = self.header[:len(self.types)]",
"def header_clean_row(row_of_data):\n header = row_of_data.get('header')[1]\n z = list(set(remove_filler_words([header])))\n return z",
"def add_header_row(self, header):\n\t\tself.add_row(header, True)",
"def edit_header(self, header):\n super().edit_header(header)\n self.z.edit_header(header)",
"def _duplicate_headers(headers, columns):\n for side, header in headers.items():\n duped = tuple([header for _ in range(columns)])\n headers[side] = np.concatenate(duped, axis=1)\n logger.debug(\"side: %s header.shape: %s\", side, header.shape)\n return headers",
"def _remove_column(self, column: str) -> None:\n dtype, loc, order = self._column_info.pop(column).values\n self._data[dtype] = np.delete(self._data[dtype], loc, axis=1)\n if self._data[dtype].shape[1] == 0:\n del self._data[dtype]\n\n for col, col_obj in self._column_info.items():\n if col_obj.dtype == dtype and col_obj.loc > loc:\n col_obj.loc -= 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the column that has the minimum number of cells in it to minimize branching Returning a column with 0 cells in it is ok this gets dealt with in the solving loop | def get_minimum_column(self):
min_col = self.root.right
current_col = min_col.right
while current_col != self.root:
if current_col.sum < min_col.sum:
min_col = current_col
# Move on to the next column
current_col = current_col.right
return min_col | [
"def smallest_column_summand(self):\n def summands_domains_sizes(col):\n return prod( [len(x.domain) for x in col[:-1]] )\n\n col_uninstans = [(self.cols[indx], summands_domains_sizes(self.cols[indx]), indx)\n for indx in range(len(self.cols)) if summands_domains_sizes(self.cols[indx]) > 1 and\n any(not d.was_propagated for d in self.cols[indx][1:-1])]\n (min_col, _, _) = min(col_uninstans, key=lambda cu: (cu[1], cu[2]))\n # col[1:-1]gets the middle elements\n smallest_var = min(min_col[1:-1], key=lambda x: len(x.domain) if len(x.domain) > 1 else float('inf'))\n return smallest_var",
"def __find_smallest(self):\r\n minval = sys.maxint\r\n for i in range(self.n):\r\n for j in range(self.n):\r\n if (not self.row_covered[i]) and (not self.col_covered[j]):\r\n if minval > self.C[i][j]:\r\n minval = self.C[i][j]\r\n return minval",
"def shortest_column(self):\n return self.column_bottoms.index(min(self.column_bottoms))",
"def getCellOfMiniPos(self):\n cellList =[]\n # [cells for cells in self.board if cells.value is 0]\n for row in self.board :\n for item in row :\n if item.value is 0 :\n cellList.append(item)\n cellPossible = list(map(lambda x: len(list(set(x.possibleVal)-set([item.value for item in x.conflictSet]))),cellList))\n index = np.argmin(cellPossible)\n return cellList[index].index",
"def find_smallest(self):\n # add max value to covered rows and columns to ignore the covered cells\n maxval = self.C.max()\n C = self.C + self.row_cover[:, np.newaxis]*maxval\n C += self.col_cover*maxval\n # return the smallest value\n return C.min()",
"def get_column_min(col_name, data):\n\n col_min = sys.float_info.max\n for row in data:\n if float(row[col_name]) <= col_min:\n col_min = float(row[col_name])\n\n return col_min",
"def find_first_free_cell(board, picked_column):\n for row in reversed(range(len(board))):\n if board[row][picked_column] == 0:\n return row",
"def min(self, col):\n return self._min(col)[0]",
"def get_smallest_h_cost_unvisited_node(self):\n node_list = []\n for column in self.grid:\n for node in column:\n if node.pos in self.unvisited_pos:\n node_list.append(node)\n return min(node_list, key=lambda x: x.h_cost)",
"def get_most_constrainted_cell(self):\n most_constrainted_cell_length = 9\n constrainted_cell_row = 0\n constrainted_cell_column = 0\n for row in range(9):\n for column in range(9):\n if not self.is_filled(row, column):\n if len(self.board[row][column]) < most_constrainted_cell_length:\n most_constrainted_cell_length = len(self.board[row][column])\n constrainted_cell_row = row\n constrainted_cell_column = column\n return (constrainted_cell_row, constrainted_cell_column)",
"def min_idx(self, col):\n return self._min(col)[1]",
"def minimum_f_cell(self):\n return sorted(self.open_cells,key = lambda cell: cell.f)[0]",
"def findOptimalColHeight(mine, state):\n bestCols = np.zeros((mine.len_x, mine.len_y)) #Initialize outptu array with zeros, to same size as an x*y array.\n\n #Get values of cells which have not been dug.\n statedUnderground = mine.underground * abs(state-1)\n\n #loop through every x and y coordinate\n for x in range(mine.len_x):\n for y in range(mine.len_y):\n\n #For each z value in a coloum, try summing from depth 0 to z, best found sum is stored.\n for z in range(1, mine.len_z+1):\n tempSum = np.sum(statedUnderground[(x, y)][0:z])\n if tempSum > bestCols[(x,y)]:\n bestCols[(x,y)] = tempSum\n #end\n #end\n #end\n #end\n\n return bestCols",
"def expand_by_minors_on_column(self, col):\n\t\tassert(col < self.cols())\n\t\td = 0\n\t\tfor row in xrange(self.rows()):\n\t\t\t# Note: the () around -1 are needed. Otherwise you get -(1**col).\n\t\t\td += (-1)**(row+col) \\\n\t\t\t\t*self[(row, col)]*self.minor(row, col).determinant()\n\t\treturn d",
"def get_next_cell(self, sympy_mat):\n max_constraint, max_cell = -1, -1\n if self.selection:\n for i in range(sympy_mat.cols - 1):\n if sum(sympy_mat.col(i)) > max_constraint:\n max_constraint = sum(sympy_mat.col(i))\n max_cell = i\n\n return (-1, -1) if max_cell == -1 else self.index_to_cell(self.dense_to_sparse[max_cell])",
"def rf_tile_min(tile_col):\n return _apply_column_function('rf_tile_min', tile_col)",
"def minCost(costMat, m, n):\n dp = [[float('inf') for i in range(len(costMat))] for j in range(len(costMat[0]))]\n dp[0][0] = costMat[0][0]\n \n def helper(costMat, m, n):\n if dp[m][n]!=float('inf'):\n return dp[m][n]\n \n if n<0 or m<0:\n return float('inf')\n \n dp[m][n] = costMat[m][n] + min(helper(costMat, m-1, n-1), helper(costMat, m, n-1), helper(costMat, m-1, n))\n return dp[m][n]\n \n return helper(costMat, m, n), dp",
"def find_smallest(num_vars):\n for x in range(10):\n if num_vars <= 2**x:\n return x",
"def findLowestAvailableBox(gameBoard):\n\tlowest_in_col = defaultdict(int)\n\tfor box in gameBoard.box:\n\t\tif box.points == 0: #only care about boxes that are still available to split\n\t\t\trow = math.floor(box.y/2)\n\t\t\tcol = math.floor(box.x/2)\n\t\t\tif row > lowest_in_col[col]:\n\t\t\t\tlowest_in_col[col] = row\n\n\treturn lowest_in_col"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method swaps out the numpy instance in the module, should it have one, to the one in the fake instance we have here. | def _swap_numpy(self, module):
# Check to make sure this is not one of the string options from the YAML
if not isinstance(module, str):
if hasattr(module, 'numpy'): # Check if it has a self.numpy object
# TODO: Replace this with the correct variable
module.numpy = self.fake.numpy # Swap out with the class's instance of numpy
return module # Return out the mutated module | [
"def test_inplace_set_value(self):\r\n dtype = self.dtype\r\n if dtype is None:\r\n dtype = theano.config.floatX\r\n\r\n shp = (100/4,1024)#100KB\r\n\r\n x = numpy.zeros(shp, dtype=dtype)\r\n x = self.cast_value(x)\r\n x_shared = self.shared_constructor(x, borrow=True)\r\n\r\n old_data = x_shared.container.storage[0]\r\n nd = numpy.ones(shp, dtype=dtype)\r\n\r\n if x.__class__.__name__ != 'csr_matrix':\r\n #sparse matrix don't support inplace affectation\r\n x_shared.container.value[:] = nd\r\n assert (numpy.asarray(x_shared.get_value(borrow=True))==nd).all()\r\n #This should always share value!\r\n assert may_share_memory(old_data, x_shared.container.storage[0])\r\n assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))\r\n\r\n nd[0]+=1\r\n x_shared.container.value[0] = nd[0]\r\n assert (numpy.asarray(x_shared.get_value(borrow=True)[0])==nd[0]).all()\r\n assert (numpy.asarray(x_shared.get_value(borrow=True)[1:])==nd[1:]).all()\r\n #This should always share value!\r\n assert may_share_memory(old_data, x_shared.container.storage[0])\r\n assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))\r\n\r\n if x.__class__.__name__ != 'csr_matrix':\r\n #sparse matrix don't support inplace affectation\r\n nd += 1\r\n #THIS DON't DO WHAT WE EXPECT the contain of a is not updated for CudaNdarray, but it is for ndarray\r\n x_shared.get_value(borrow=True)[:] = nd\r\n #assert (numpy.asarray(x_shared.get_value(borrow=True))!=nd).all()\r\n assert may_share_memory(old_data, x_shared.container.storage[0])\r\n x_shared.get_value(borrow=True)\r\n\r\n # Test by set_value with borrow=False\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(nd, borrow=False)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),\r\n self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace\r\n\r\n # Test by set_value with borrow=False when new data cast.\r\n # specificaly useful for gpu data\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(self.cast_value(nd), borrow=False)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),\r\n self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_cast_value_inplace\r\n\r\n # Test by set_value with borrow=True\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(nd.copy(), borrow=True)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),\r\n self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace\r\n\r\n # Test by set_value with borrow=True when new data cast.\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(self.cast_value(nd.copy()), borrow=True)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)), self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_cast_value_inplace",
"def __resetTestArray(self):\n\tself.__resetArray(self.__TEST)",
"def reset(self):\n self.array = self.original\n self.original = list(self.original)\n return self.array",
"def test_Numpy_import(benchmark):\n\n def Benchmark():\n import numpy as np\n a = np.ndarray(1)\n del a\n\n benchmark(Benchmark)",
"def set_value(self, new_value, borrow=False):\n new_value = np.array(new_value, copy = not borrow)\n try:\n if self.shape != new_value.shape:\n self.resize(new_value.shape, refcheck=False)\n # refcheck is necessary to get this to work, but bypasses\n # the reference checks. Reference errors might occur if\n # a reference to this ShimmedTensorShared variable exists elsewhere,\n # and we try to access it after the resize. This is the kind\n # of thing you shouldn't do anyway with Theano variables.\n self[:] = new_value\n except IndexError:\n # Scalars will fail on the above\n assert(isscalar(new_value))\n # np.isscalar will fail on 0-dim arrays; isscalar works\n self = super(ShimmedTensorShared, self).__setitem__(None, new_value)",
"def test_ndarray_copy(self):\r\n assert copy(numpy.ndarray) is numpy.ndarray\r\n assert deepcopy(numpy.ndarray) is numpy.ndarray",
"def numpy(self):\n for key, value in self.__dict__.items():\n self.__dict__[key] = value.numpy()\n return self",
"def test_numpy_arrays_not_copied(self):\n with PhysicsEngineHarness('tests/engineering-test.json') as physics_engine:\n state = physics_engine.get_state()\n\n engineering = state.engineering\n engineering.components[0].temperature = 777777.7\n self.assertEqual(engineering._array[2 * N_COMPONENTS], 777777.7)\n self.assertEqual(state.y0()[state.ENGINEERING_START_INDEX + 2 * N_COMPONENTS], 777777.7)",
"def test_np_array_creation(self):\n self.assertTrue(np.array_equal(recursive_make_object(self.np_dict, self.class_dictionary), self.np_array))",
"def test_changing_copy(self):\n self.copy.arr[0] = 55.0\n assert not list_ordered_equal(self.copy, self.orig), 'Altering one vector should not affected the other'",
"def stub_out(self, old, new):\n self.useFixture(fixtures.MonkeyPatch(old, new))",
"def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument\n if out_arr.shape != (3,):\n out_arr = out_arr.view(np.ndarray)\n return out_arr",
"def unify(self, typingctx, other):\n # If other is array and the ndim matches\n if isinstance(other, Array) and other.ndim == self.ndim:\n # If dtype matches or other.dtype is undefined (inferred)\n if other.dtype == self.dtype or not other.dtype.is_precise():\n if self.layout == other.layout:\n layout = self.layout\n else:\n layout = 'A'\n readonly = not (self.mutable and other.mutable)\n aligned = self.aligned and other.aligned\n return Array(dtype=self.dtype, ndim=self.ndim, layout=layout,\n readonly=readonly, aligned=aligned)",
"def test_convert_array(self):\r\n data = np.array([1, 2, 3])\r\n res = data.numpy()\r\n\r\n assert np.shares_memory(res, data)\r\n assert np.all(res == data)\r\n assert isinstance(res, np.ndarray)\r\n assert not isinstance(res, np.tensor)",
"def reset(self):\n self.array = self.origin\n return self.array",
"def __array__(self):\n return self.state(numpy=True)",
"def intern(self,x, newshape=None):\n#\t\tprint 'intern ',repr(x)\n\t\tif not hasattr(x, 'shape'):\n\t\t\tx = np.array(x,dtype=self.dtype)\n\t\t\t\n\t\tif newshape is not None and x.shape!=newshape and x.shape!=():\n\t\t\tx=x.reshape(newshape)\n#\t\tprint 'intern ',type(x),repr(x)\n\t\tif not isinstance(x, cl.array.Array):\n\t\t\tif x.shape == (): # x is a scalar?\n\t\t\t\tx=cl.array.to_device(self.queue, np.array([x],dtype=self.dtype),allocator=self.allocator)\n\t\t\telse:\n\t\t\t\tx=cl.array.to_device(self.queue, x.astype(self.dtype),allocator=self.allocator)\n#\t\tprint 'intern return x=',type(x),repr(x)\n\t\treturn x",
"def overwrite(self, array):\n assert type(array) == np.ndarray, 'array must be a np.ndarray.'\n array = reformat(array)\n self.numpy_array = array\n _eigenmat.init_from_array(self.p_mat, array.ctypes.data_as(ct.POINTER(ct.c_float)), ct.c_int(array.shape[0]), ct.c_int(array.shape[1]))",
"def post_process(self, numpy_dict):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method injects in the providers to the faker instance. | def add_providers(self):
str_providers = PROVIDERS[0] # Providers, called by name
live_providers = PROVIDERS[1] # Providers, provided as a live module
for providers in PROVIDERS: # Iterate over the types of providers
for provider in providers: # Iterate over all the methods
# Inject those into faker, and swap the numpy instance
self.fake.add_faker(self._swap_numpy(provider[0]), provider[1]) | [
"def providers(self):\n return [self.fake_provider]",
"def setup_provider(self):\n pass",
"def faker() -> Faker:\n\n return Faker()",
"def fake_init():\n return Faker()",
"def create_providers(cls) -> Iterable['BaseProvider']:\n return []",
"def test_faker_customization(self):\n Trip = collections.namedtuple('Trip', ['departure', 'transfer', 'arrival'])\n\n may_4th = datetime.date(1977, 5, 4)\n may_25th = datetime.date(1977, 5, 25)\n october_19th = datetime.date(1977, 10, 19)\n\n class TripFactory(factory.Factory):\n class Meta:\n model = Trip\n\n departure = may_4th\n arrival = may_25th\n transfer = factory.Faker(\n 'date_between_dates',\n start_date=factory.SelfAttribute('..departure'),\n end_date=factory.SelfAttribute('..arrival'),\n )\n\n def fake_select_date(start_date, end_date):\n \"\"\"Fake date_between_dates.\"\"\"\n # Ensure that dates have been transferred from the factory\n # to Faker parameters.\n self.assertEqual(start_date, may_4th)\n self.assertEqual(end_date, may_25th)\n return october_19th\n\n self._setup_advanced_mock_faker(\n date_between_dates=fake_select_date,\n )\n\n trip = TripFactory()\n self.assertEqual(may_4th, trip.departure)\n self.assertEqual(october_19th, trip.transfer)\n self.assertEqual(may_25th, trip.arrival)",
"def _load_providers(self, **kwargs):\n return super()._load_providers(providers=\"TIProviders\", **kwargs)",
"def initiate_providers(self_name):\n if self_name not in providers_config:\n return\n\n for provider_name in providers_config[self_name]:\n oauth.register(name=provider_name, **providers_config[self_name][provider_name])",
"def _fixture_setup(self):\n pass",
"def make_factories(self, classes):",
"def setUp(self):\n patientgen = PatientsGenerator(0, 1, 0, 'a')\n self.record = patientgen.data.find('record')\n self.gender_sex = patientgen.gender_sex_list\n self.ethnicities = patientgen.ethnicity_list\n # self.female_names = patientgen.data_generator.first_names_female\n # self.male_names = patientgen.data_generator.first_names_male\n # self.last_names = patientgen.data_generator.last_names",
"def register(self, provider):\n for entry in dir(provider):\n try:\n provider_function = type(provider).__dict__[entry]\n factory_provider = getattr(provider_function, 'factory_provider', None)\n if factory_provider:\n provided_type, singleton = factory_provider\n if callable(provider_function): # A function or member function\n # if it's a bound method, this will get the bound version\n provider_member = getattr(provider, entry)\n self.add_factory(provided_type, provider_member, singleton)\n elif hasattr(provider_function, '__get__'):\n # this is a property or non-callable descriptor:\n self.add_factory(\n provided_type,\n functools.partial(provider_function.__get__, provider, provider),\n singleton,\n )\n else:\n self.add_service(provided_type, provider_function)\n except KeyError:\n pass",
"def _get_faker(field_metadata):\n pii_locales = field_metadata.get('pii_locales', None)\n return Faker(locale=pii_locales)",
"def load_providers():\n providers = app_settings.DONATION_PROVIDERS\n logger.debug('Loading Donation Providers')\n from donations.models import DonationProvider\n for name, klass in providers.items():\n try:\n provider, created = DonationProvider.objects.get_or_create(name=name, klass=klass)\n status = \"New\" if created else \"Existing\"\n logger.debug(\"%s provider %s loaded\", status, provider)\n except Exception as exc:\n logger.warning(\"Could not load the DonationProvider model instance due to %s\", exc)",
"def run_providers(self, argv):\n\n for name, provider in self.providermanager:\n provider = provider(self)\n self.produce_output(provider.title,\n provider.location,\n provider.run(argv))",
"def _build_observation_providers(self) -> Dict[str, ObservationProvider]:\n pass",
"def _register_providers(self, config_provider: _ConfigurationProvider) -> None:\n super()._register_providers(config_provider)\n config_provider.register_provider(\n _CloudConfigurationProvider(self._ge_cloud_config)\n )",
"def initialize_client():\n logging.info('Initializing Sendgrid provider')\n sendgrid_authentication, sendgrid_username = get_provider_credentials('sendgrid') \n sendgrid_provider = SendGridProvider(sendgrid_authentication, sendgrid_username)\n\n logging.info('Initializing Mailgun provider')\n mailgun_authentication, mailgun_domain = get_provider_credentials('mailgun')\n mailgun_provider = MailGunProvider(mailgun_authentication, mailgun_domain)\n\n logging.info('Registering providers')\n client.register_provider(sendgrid_provider, 10)\n client.register_provider(mailgun_provider, 20)",
"def test_multi_registered_provider(self):\n config_provider.register_provider(DummyProvider)\n self.assertRaises(KeyError, config_provider.register_provider,\n Dummy2Provider)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a map of duplicates and probabilities according to a pdf, i.e. uniform and store for reuse on each original event current version taken directly from FEBRL needs review b/c number of duplicates stored starts at 2? | def generate_duplicate_pdf(self):
num_dup = 1
prob_sum = 0.0
prob_list = [(num_dup, prob_sum)]
max_dups = self.duplicate_cfg["Max_duplicate"]
uniform_val = 1.0 / float(max_dups)
for i in range(max_dups - 1):
num_dup += 1
prob_list.append((num_dup, uniform_val + prob_list[-1][1]))
return prob_list | [
"def field_pdf(self):\n num_dup = 1\n prob_sum = 0.0\n prob_list = [(num_dup, prob_sum)]\n max_dups = self.duplicate_cfg[\"Max_duplicate\"]\n uniform_val = 1.0 / float(max_dups)\n self.__logger.debug(\"Maximum number of duplicatesi %d\", max_dups)\n for i in range(max_dups - 1):\n num_dup += 1\n prob_list.append((num_dup, uniform_val + prob_list[-1][1]))\n return prob_list",
"def get_probabilities(counts, total):\n result = {}\n for ngram, count in counts.items():\n result[ngram] = math.log(count / total)\n return result",
"def prob_dist(line1, line2, line3, line4, model):\n vocab = set(counts_un.keys())\n probs = dict()\n for line5 in vocab:\n probs[line5] = model.get_pentagram_prob(line1, line2, line3, line4, line5)\n return probs",
"def __get_prob_dist(self):\n prob_dist = {}\n\n total_word_count = sum([self._occurence_counter[word]**self.neg_sampling for word in self._occurence_counter])\n for word in self._occurence_counter:\n prob_dist[word] = (self._occurence_counter[word]**self.neg_sampling)/total_word_count\n\n if self.verbose: print(\"\\nProbability distribution created.\")\n # if self.debug: print(\"Probability distribution: \"+str(prob_dist))\n return prob_dist",
"def prob_dist(line1, line2, model):\n vocab = set(counts_un.keys())\n probs = dict()\n for line3 in vocab:\n probs[line3] = model.get_trigram_prob(line1, line2, line3)\n return probs",
"def calc_prob(data):\n total = len(data)\n frequencies = sorted(Counter(data).items())\n probabilities = OrderedDict()\n for (key, value) in frequencies:\n probabilities[key] = value / total\n return probabilities",
"def pmf_dict(colors, nsample):\n _validate_params(colors, nsample)\n with mp.extradps(5):\n total = sum(colors)\n logdenom = logbinomial(total, nsample)\n p = {}\n for coords in support(colors, nsample):\n lognumer = 0\n for color, k in zip(colors, coords):\n lognumer += logbinomial(color, k)\n logprob = lognumer - logdenom\n p[tuple(coords)] = mp.exp(logprob)\n return p",
"def compute_probabilities(self):\n\t\tself.log_prior = np.log(self.documents / self.total_documents)\n\t\tfor label, counts in enumerate(self.counts):\n\t\t\tself.log_likelihood[label] = {k: np.log(v / self.total_counts[label]) for k, v in self.counts[label].items()}",
"def _compute_register_probs(cls, num_values, probability):\n bits = np.arange(1, num_values + 1)\n probs = scipy.stats.geom.pmf(bits, probability)\n\n return probs / sum(probs)",
"def probability(prods, prod_dict_As, count_dict):\n for p in prods:\n if p not in prod_dict_As:\n raise Exception(\"Think we cannot make the product {}.\".format(p))\n # Argh, Python, this is a reference!\n #possible_As = prod_dict_As[prods[0]]\n possible_As = set( prod_dict_As[prods[0]] )\n for p in prods[1:]:\n possible_As &= prod_dict_As[p]\n ret = []\n for A in possible_As:\n count = 1\n for p in prods:\n count *= count_dict[(p,A)]\n ret.append((A,count))\n return ret",
"def createProbabilityDict(markDict, n):\n length = len(markDict.keys()) \n # create sorted dict - rank as key, id as value \n sorted_list = sorted(markDict.items(), key=lambda x: x[1]) \n sorted_dict = {} \n\n for counter, value in enumerate(sorted_list): \n sorted_dict[counter + 1] = value[0] \n\n if n > 2 or n < 1: \n return -1 \n \n n_min = 2 - n \n ranked_dict = {} \n sum = 0 \n for key in sorted_dict: \n ranked_dict[sorted_dict[key]] = 1/length*(n-(n_min - n)*(key - 1)/(length - 1)) \n sum += ranked_dict[sorted_dict[key]] \n \n for key in ranked_dict: \n ranked_dict[key] = ranked_dict[key]/sum \n \n print(ranked_dict) \n return ranked_dict",
"def generate_pdf_training_data(cls):\n sz = cls.test_set_size\n _x = np.zeros((sz, cls.state_size))\n _y = np.zeros((sz, cls.action_size))\n u = dict()\n u[str(_x[0])] = True\n for _i in range(0, sz):\n _pdf = np.random.randint(100, size=cls.action_size)\n _pdf = _pdf / np.sum(_pdf)\n _x[_i] = np.random.randint(3, size=cls.action_size)\n while str(_x[_i]) in u:\n _x[_i] = np.random.randint(3, size=cls.action_size)\n u[str(_x[_i])] = True\n _y[_i] = _pdf\n return _x, _y",
"def get_ngramlogprobs(freqdict):\n return",
"def calc_probs(d):\n dict_probs_d = {}\n d_set = set(d)\n for word in d_set:\n dict_probs_d[word] = d.count(word) / len(d)\n return dict_probs_d",
"def logpmf_dict(colors, nsample):\n _validate_params(colors, nsample)\n with mp.extradps(5):\n total = sum(colors)\n logdenom = logbinomial(total, nsample)\n logpmf = {}\n for coords in support(colors, nsample):\n lognumer = 0\n for color, k in zip(colors, coords):\n lognumer += logbinomial(color, k)\n logprob = lognumer - logdenom\n logpmf[tuple(coords)] = logprob\n return logpmf",
"def renormalize(pmf):\n # determine how much to muliply each entry's probablity by to re-normalize\n total = sum(pmf.values())\n mult = 1 / total\n return dict([(key, prob * mult) for key, prob in pmf.items()])",
"def sample_pagerank(corpus, damping_factor, n):\n probabilities = dict()\n samples = []\n\n # Random first sample\n page = random.choice(list(corpus.keys()))\n samples.append(page)\n \n # Remaining samples after first\n for i in range(n-1):\n p = transition_model(corpus, page, damping_factor)\n page = random.choices(list(p.keys()), weights=list(p.values()), k=1)[0]\n samples.append(page)\n\n # Count\n for p in corpus.keys():\n probabilities[p] = samples.count(p) / n\n\n return probabilities",
"def calcProbability(self):\n for attribute in self.attributes:\n index = self.F2I[attribute]\n features = set([self.train[i][0][index] for i in range(len(self.train))])\n for feature in features:\n #all the true and false\n result_t = list(filter(lambda x: x[1]== True, self.train))\n total_t = len(result_t)\n result_f = list(filter(lambda x: x[1]== False, self.train))\n total_f= len(result_f)\n #the probability for the feature if its true or false\n t = len(list(filter(lambda x: x[0][index] == feature, result_t)))\n f = len(list(filter(lambda x: x[0][index] == feature, result_f)))\n prob_yes= t/total_t\n prob_no = f/total_f\n #assign the probabilities to the dictionaries\n self.probs_yes[(index,feature)] = prob_yes\n self.probs_no[(index,feature)] = prob_no",
"def _calculate_measurement_probs(measurements):\n total_mes = len(measurements)\n unique_mes = [list(x) for x in {tuple(x) for x in measurements}]\n total_unique_mes = len(unique_mes)\n len_qubits = len(unique_mes[0])\n measurements_probabilities = {}\n for i in range(total_unique_mes):\n strqubits = ''\n for qubit_idx in range(len_qubits):\n strqubits += str(unique_mes[i][qubit_idx])\n prob = measurements.count(unique_mes[i]) / total_mes\n measurements_probabilities[strqubits] = prob\n\n return measurements_probabilities"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines whether original record will be duplicated Gets the maximum number of duplicated records to generate | def expect_duplicate(self):
# Reset everything for this record
self._expect_duplicate = False
self.__dupcntr = 0
self.__maxdup = 0
# Get the probability to generate duplicate for next record
if self.fake.random.random() < self.duplicate_cfg["Prob_duplicate"]:
self._expect_duplicate = True
self.__maxdup = self.random_select_ndups()
else:
self._expect_duplicate = False
self.__maxdup = 0
self.__logger.debug("expect_duplicate ndups: %d", self.__maxdup) | [
"def is_duplicate(self):\n return bool(self.duplicated)",
"def is_duplicated_data_page(self, new_record_set):\n print(\"def is_duplicated_data_page \" + TimeStamp.timestamp()) #Elina 08-12-2020\n previous_page_links = [old_record for old_record in self.previous_page_of_records]\n\n def is_duplicate_record(new_record):\n return True if new_record in previous_page_links else False\n\n all_is_duplicates = [is_duplicate_record(a_record) for a_record in new_record_set]\n return True if all(all_is_duplicates) else False",
"def test_AutoIDOnDuplicateID(self):\n expSize = 2\n autoID = True\n coll = ERP.RecordCollection()\n theID = \"W222\"\n coll.addRecord(theID, \"F\", None, None, None, None, autoID, None)\n \"\"\"Prove expSize is not achieved yet\"\"\"\n self.assertNotEqual(expSize, len(coll.getAllRecords()))\n coll.addRecord(theID, \"F\", None, None, None, None, autoID, None)\n self.assertEqual(expSize, len(coll.getAllRecords()))",
"def duplicate(self):\n return self._duplicated",
"def isRepeated(self):\n return self._field.label == FieldDescriptor.LABEL_REPEATED",
"def include_duplicated_records(self):\n self._include_duplicated_records = True\n return self",
"def test_OverwriteOnDuplicateID(self):\n expSize = 1\n overwrite = True\n coll = ERP.RecordCollection()\n theID = \"F333\"\n coll.addRecord(theID, \"M\", None, None, None, None, None, overwrite)\n self.assertEqual(expSize, len(coll.getAllRecords()))\n originalRec = coll.getAllRecords()[0]\n coll.addRecord(theID, \"M\", None, None, None, None, None, overwrite)\n \"\"\"Prove expSize remains the same\"\"\"\n self.assertEqual(expSize, len(coll.getAllRecords()))\n newRec = coll.getAllRecords()[0]\n \"\"\"Prove that objects are different\"\"\"\n self.assertNotEqual(originalRec, newRec)\n \"\"\"Prove that they have the same ID\"\"\"\n self.assertEqual(originalRec.getID(), newRec.getID())",
"def check_duplicates():\n print \"Building data set...\\n\"\n\n rows = db.links.find()\n seen = set()\n count = 0\n for row in rows:\n value = hashlib.md5(row['body'].encode('utf8')).hexdigest()\n if value in seen:\n count += 1\n print row['category'], row['_id']\n # db.links.remove({'_id': row['_id']})\n else:\n seen.add(value)\n print count, 'duplicate(s)'\n print \"-------------------\\n\"",
"def process_duplicate_rows(self):\n pass",
"def is_repetition(self):\n return self.id == 1",
"def checkUniqueID(records):\n seqIDs = [records[x].id for x in range(len(records))]\n IDcounts = Counter(seqIDs)\n duplicates = [k for k, v in IDcounts.items() if v > 1]\n if duplicates:\n print(\"Input sequence IDs not unique. Quiting.\")\n print(duplicates)\n sys.exit(1)\n else:\n pass",
"def Get_dup_records(ds,key_var):\n temp = ds.groupby([key_var]).agg({key_var:'count'}).rename(columns={key_var:'Freq'}).reset_index()\n temp = temp[temp['Freq']>1]\n print(\"Total Duplicate records:: \" +str(temp.shape[0]))\n\n return temp",
"def corrupt_records(self, rec_dict):\n\n # Check if number of records given is what is expected\n #\n assert self.number_of_org_records == len(rec_dict), \\\n 'Illegal number of records to modify given'\n\n # First generate for each original record the number of duplicates that are\n # to be generated for it.\n #\n dup_rec_num_dict = {} # Keys are the record identifiers of the original\n # records, value their number of duplicates\n total_num_dups = 0 # Total number of duplicates generated\n\n org_rec_id_list = list(rec_dict) # keys()\n random.shuffle(org_rec_id_list)\n\n org_rec_i = 0 # Loop counter over which record to assign duplicates to\n\n while ((org_rec_i < self.number_of_org_records) and\n (total_num_dups < self.number_of_mod_records)):\n\n # Randomly choose how many duplicates to create for this original record\n #\n r = random.random() # Random number between 0.0 and 1.0\n ind = -1\n while (self.prob_dist_list[ind][1] > r):\n ind -= 1\n num_dups = self.prob_dist_list[ind][0]\n\n assert (num_dups > 0) and (num_dups <= self.max_num_dup_per_rec)\n\n # Check if there are still 'enough' duplicates to generate\n #\n if (num_dups <= (self.number_of_mod_records-total_num_dups)):\n\n # Select next record for which to generate duplicates\n #\n org_rec_id = org_rec_id_list[org_rec_i]\n org_rec_i += 1\n dup_rec_num_dict[org_rec_id] = num_dups\n total_num_dups += num_dups\n\n assert total_num_dups == sum(dup_rec_num_dict.values())\n\n # Deal with the case where every original record has a number of duplicates\n # but not enough duplicates are generated in total\n #\n org_rec_id_list = list(rec_dict) # .keys()\n random.shuffle(org_rec_id_list)\n\n while (total_num_dups < self.number_of_mod_records):\n org_rec_id = random.choice(org_rec_id_list)\n\n # If possible, increase number of duplicates for this record by 1\n #\n if (dup_rec_num_dict[org_rec_id] < self.max_num_dup_per_rec):\n dup_rec_num_dict[org_rec_id] = dup_rec_num_dict[org_rec_id]+1\n total_num_dups += 1\n\n assert sum(dup_rec_num_dict.values()) == self.number_of_mod_records\n\n # Generate a histogram of number of duplicates per record\n #\n\n f = open(\"output_log_file.txt\", \"a\")\n\n dup_histo = {}\n\n for (org_rec_id_to_mod, num_dups) in dup_rec_num_dict.items():\n dup_count = dup_histo.get(num_dups, 0) + 1\n dup_histo[num_dups] = dup_count\n f.write('Distribution of number of original records with certain number of duplicates: \\n')\n dup_histo_keys = dup_histo.keys()\n sorted(dup_histo_keys) # .sort()\n for num_dups in dup_histo_keys:\n f.write(' Number of records with %d duplicates: %d \\n' %\n (num_dups, dup_histo[num_dups]))\n f.write(\"\\n\")\n\n num_dup_rec_created = 0 # Count how many duplicate records have been\n # generated\n\n # Main loop over all original records for which to generate duplicates - -\n #\n for (org_rec_id_to_mod, num_dups) in dup_rec_num_dict.items():\n assert (num_dups > 0) and (num_dups <= self.max_num_dup_per_rec)\n\n f.write(\"\\n\")\n f.write('Generating %d modified (duplicate) records for record \"%s\" \\n' %\n (num_dups, org_rec_id_to_mod))\n\n print('Generating %d modified (duplicate) records for record \"%s\" \\n' %\n (num_dups, org_rec_id_to_mod))\n\n rec_to_mod_list = rec_dict[org_rec_id_to_mod]\n\n d = 0 # Loop counter for duplicates for this record\n\n this_dup_rec_list = [] # A list of all duplicates for this record\n\n # Loop to create duplicate records - - - - - - - - - - - - - - - - - - - -\n #\n while (d < num_dups):\n\n # Create a duplicate of the original record\n #\n # Make copy of original record\n dup_rec_list = rec_to_mod_list[:]\n\n org_rec_num = org_rec_id_to_mod.split('-')[1]\n dup_rec_id = 'rec-%s-bbb-%d' % (org_rec_num, d)\n f.write(' Generate identifier for duplicate record based on \"%s\": %s \\n'\n % (org_rec_id_to_mod, dup_rec_id))\n\n # Count the number of modifications in this record (counted as the\n # number of modified attributes)\n #\n num_mod_in_record = 0\n\n # Set the attribute modification counters to zero for all attributes\n # that can be modified\n #\n attr_mod_count_dict = {}\n for attr_name in self.attr_mod_prob_dict.keys():\n attr_mod_count_dict[attr_name] = 0\n\n # Abort generating modifications after a larger number of tries to\n # prevent an endless loop\n #\n max_num_tries = self.num_mod_per_rec*10\n num_tries = 0\n\n # Now apply desired number of modifications to this record\n #\n while ((num_mod_in_record < self.num_mod_per_rec) and\n (num_tries < max_num_tries)):\n\n # Randomly modify an attribute value\n #\n r = random.random() # Random value between 0.0 and 1.0\n i = 0\n while (self.attr_mod_prob_list[i][0] < r):\n i += 1\n mod_attr_name = self.attr_mod_prob_list[i][1]\n\n if (attr_mod_count_dict[mod_attr_name] < self.max_num_mod_per_attr):\n mod_attr_name_index = self.attribute_name_list.index(\n mod_attr_name)\n mod_attr_val = dup_rec_list[mod_attr_name_index]\n\n # Select an attribute to modify according to probability\n # distribution of corruption methods\n #\n attr_mod_data_list = self.attr_mod_data_dict[mod_attr_name]\n\n r = random.random() # Random value between 0.0 and 1.0\n p_sum = attr_mod_data_list[0][0]\n i = 0\n while (r >= p_sum):\n i += 1\n p_sum += attr_mod_data_list[i][0]\n corruptor_method = attr_mod_data_list[i][1]\n\n # Modify the value from the selected attribute\n #\n new_attr_val = corruptor_method.corrupt_value(\n mod_attr_val)\n\n org_attr_val = rec_to_mod_list[mod_attr_name_index]\n\n # If the modified value is different insert it back into modified\n # record\n #\n if (new_attr_val != org_attr_val):\n f.write(' Selected attribute for modification:' + str(mod_attr_name) + \"\\n\")\n f.write(' Selected corruptor:' + str(corruptor_method.name) + \"\\n\")\n\n # The following weird string printing construct is to overcome\n # problems with printing non-ASCII characters\n #\n f.write(' Original attribute value:' + str([org_attr_val][1:-1]) + \"\\n\")\n f.write(' Modified attribute value:' + str([new_attr_val])[1:-1] + \"\\n\")\n\n dup_rec_list[mod_attr_name_index] = new_attr_val\n\n # One more modification for this attribute\n #\n attr_mod_count_dict[mod_attr_name] += 1\n\n # The number of modifications in a record corresponds to the\n # number of modified attributes\n #\n num_mod_in_record = 0\n\n for num_attr_mods in attr_mod_count_dict.values():\n if (num_attr_mods > 0):\n num_mod_in_record += 1 # One more modification\n assert num_mod_in_record <= self.num_mod_per_rec\n\n num_tries += 1 # One more try to modify record\n\n # Check if this duplicate is different from all others for this original\n # record\n #\n is_diff = True # Flag to check if the latest duplicate is different\n\n if (this_dup_rec_list == []): # No duplicate so far\n this_dup_rec_list.append(dup_rec_list)\n else:\n for check_dup_rec in this_dup_rec_list:\n if (check_dup_rec == dup_rec_list): # Same as a previous duplicate\n is_diff = False\n f.write('Same duplicate:' + str(check_dup_rec) + \"\\n\")\n f.write(' ' + str(dup_rec_list) + \"\\n\")\n\n if (is_diff == True): # Only keep duplicate records that are different\n\n # Safe the record into the overall record dictionary\n #\n rec_dict[dup_rec_id] = dup_rec_list\n\n d += 1\n num_dup_rec_created += 1\n\n f.write('Original record:\\n')\n f.write(' ' + str(rec_to_mod_list) + \"\\n\")\n f.write('Record with %d modified attributes\\n' %\n (num_mod_in_record),)\n attr_mod_str = '('\n for a in self.attribute_name_list:\n if (attr_mod_count_dict.get(a, 0) > 0):\n attr_mod_str += '%d in %s, ' % (\n attr_mod_count_dict[a], a)\n attr_mod_str = attr_mod_str[:-1]+'):'\n f.write(str(attr_mod_str) + \"\\n\")\n f.write(' ' + str(dup_rec_list) + \"\\n\")\n f.write('%d of %d duplicate records generated so far \\n' %\n (num_dup_rec_created, self.number_of_mod_records))\n f.write(\"\\n\")\n\n f.close()\n\n return rec_dict",
"def test_excessive_duplication(self):\n # Create w/o the api to intentionally create duplicates\n duplicates = []\n for x in range(100):\n pd_id = 'Pd_{}.{}'.format(x, self.student.id)\n pd = Pd(key_name=pd_id, id=pd_id, parent=self.student,\n scope=self.student.id, program=self.program.id,\n variable='condition', value='duplicate test', public=True)\n duplicates.append(pd)\n db.put(duplicates)\n\n # Prove that there are duplicates.\n duplicates = self.student_api.get('pd', {}, ancestor=self.student)\n self.assertEquals(len(duplicates), 100)\n\n # Attempt to delete the excessive duplicates, expecting an exception.\n with self.assertRaises(Exception):\n pd_id = 'Pd_101.{}'.format(self.student.id)\n pd = Pd(key_name=pd_id, id=pd_id, parent=self.student,\n scope=self.student.id, program=self.program.id,\n variable='condition', value='non-duplicate', public=True)\n Pd.delete_previous_versions(pd, self.student)",
"def testDuplicate(self,permutations=True):\n # This algorithm is faster than encode,\n # but for nplex=2 enmagic2 would probably still be faster.\n if permutations:\n C = self.copy()\n C.sort(axis=1)\n else:\n C = self\n ind = sortByColumns(C)\n C = C.take(ind,axis=0)\n ok = (C != roll(C,1,axis=0)).any(axis=1)\n if not ok[0]: # all duplicates -> should result in one unique element\n ok[0] = True\n return ind,ok",
"def test_add_dup(self):\n for i in range(3):\n self.datastore.save(self.trans)\n\n eq_(1, self.datastore._collection.count())",
"def test_has_duplicate():\n id_set = {\n 'Layouts': []\n }\n id_set['Layouts'].append({\n 'urlRep': {\n 'typeID': 'urlRep',\n 'fromVersion': '5.0.0',\n 'kind': 'Details',\n 'path': 'Layouts/layout-details-urlrep.json',\n 'pack': 'urlRep1'\n }\n })\n\n id_set['Layouts'].append({\n 'urlRep': {\n 'typeID': 'urlRep',\n 'kind': 'Details',\n 'path': 'Layouts/layout-details-urlrep2.json',\n 'pack': 'urlRep2'\n }\n })\n\n has_duplicates = has_duplicate(id_set['Layouts'], 'urlRep', 'Layouts', False)\n assert has_duplicates is True",
"def test_are_duplicates_length(self):\n rules = [\n pd.Series({\"A\": \"high\", \"B\": Bounds(lower=1, upper=1), \"C\": Bounds(lower=1, upper=1), \"Class\": \"apple\"},\n name=1),\n pd.Series({\"B\": Bounds(lower=1, upper=1), \"C\": Bounds(lower=1, upper=1),\n \"Class\": \"apple\"}, name=2)\n ]\n duplicate = _are_duplicates(rules[0], rules[1])\n self.assertTrue(duplicate is False)",
"def test_duplicate_entries(self):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the predictions of the original model on training and validation datasets. The original model is also trained if train = True. | def generate_original_preds(train = True):
x_train, y_train, x_val, y_val, id_to_word = load_data()
model = create_original_model()
if train:
filepath="models/original.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc',
verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
model.fit(x_train, y_train, validation_data=(x_val, y_val),callbacks = callbacks_list, epochs=epochs, batch_size=batch_size)
model.load_weights('./models/original.hdf5',
by_name=True)
pred_train = model.predict(x_train,verbose = 1, batch_size = 1000)
pred_val = model.predict(x_val,verbose = 1, batch_size = 1000)
if not train:
print('The val accuracy is {}'.format(calculate_acc(pred_val,y_val)))
print('The train accuracy is {}'.format(calculate_acc(pred_train,y_train)))
np.save('data/pred_train.npy', pred_train)
np.save('data/pred_val.npy', pred_val) | [
"def predict(self, X_train, y_train, test_predict=False):\n self.plot_sample = self.data_loader.get_plot_sample(X_train, y_train)\n self.number = int(math.sqrt(self.data_loader.square_number))\n\n for self.model in self.models:\n self.model.fit(X_train, y_train)\n self.model_predictions.append(self.model.predict(X_train))\n \n if test_predict:\n self.test_predictions.append(self.model.predict(self.data_loader.X_test))\n \n self.blend_sample_predictions()\n self.plot_sample_predictions()",
"def get_predictions(fitted_model_filename):\n click.echo(\"Mode: predicting probabilities.\\n\")\n defaults = get_defaults()\n\n fitted_model_filename = add_extension(fitted_model_filename)\n fitted_model_path = os.path.join(defaults.OUTPUT.FITTED_MODELS_PATH, fitted_model_filename)\n new_options = [\"OUTPUT.FITTED_MODEL_PATH\", fitted_model_path]\n\n # boot_data = bootstrap(new_options, mode=\"internal_test\")\n # model = boot_data['model']\n #\n # X_test_int, y_test_int = boot_data['data']\n # internal_test_proba = model.predict_proba(X_test_int)\n # internal_test_proba = np.c_[y_test_int, internal_test_proba[:, 1]]\n\n boot_data = bootstrap(new_options, mode=\"external_test\")\n model = boot_data['model']\n X_test_ext, y_test_ext = boot_data['data']\n\n # fit scaler on train data and transform test data\n scaler = StandardScaler()\n X_train, y_train = load_data(defaults, which='train')\n\n numeric_cols = X_train.select_dtypes(include=np.float64).columns.tolist()\n scaler.fit(X_train[numeric_cols])\n X_test_ext.loc[:, numeric_cols] = scaler.transform(X_test_ext[numeric_cols])\n\n external_test_proba = model.predict_proba(X_test_ext)\n external_test_proba = np.c_[y_test_ext, external_test_proba[:, 1]]\n\n # internal_test_results_path = os.path.join(defaults.OUTPUT.PREDS_PATH, \"internal_test_preds.csv\")\n external_test_results_path = os.path.join(defaults.OUTPUT.PREDS_PATH,\n f\"external_test_preds_{fitted_model_filename.replace('.pkl', '')}.csv\")\n # pd.DataFrame(internal_test_proba, columns=['target', 'proba']).to_csv(internal_test_results_path, index=False)\n pd.DataFrame(external_test_proba, columns=['target', 'proba']).to_csv(external_test_results_path, index=False)",
"def fit_predict(self):\n self.best_model = self.get_model(self.best_params, 'full')\n self.best_y_pred_future = self.predict(self.best_model, self.best_params, 'future')",
"def train_and_predict():\n x_kaggle, y_kaggle, = _load_kaggle_dataset(\"../datasets/kaggle/\")\n x_train_webcam, y_train_webcam = _load_dataset(\"../datasets/segmentation/\")\n\n x_kaggle.extend(x_train_webcam)\n y_kaggle.extend(y_train_webcam)\n\n x_kaggle = np.array(x_kaggle)\n y_kaggle = np.array(y_kaggle)\n\n model: Model = _create_model()\n model.summary()\n \n history = model.fit(\n x_kaggle, y_kaggle,\n epochs=150,\n batch_size=20,\n )\n model.save(\"saved_models/u-net\")",
"def fit_predict(self):\n # if self.best_params['use_date_featurizer']:\n # # If we trained the model with exogeneous array, we have to provide it again.\n # self.best_model.update(self.y_test, self.X_test)\n \n # else:\n # self.best_model.update(self.y_test)\n\n self.best_model.update(self.y_test, self.X_test)\n self.best_y_pred_future = self.predict(self.best_model, self.best_params, 'future')",
"def predict(model, X_testing):\n predictions = model.predict(X_testing)\n\n return predictions",
"def run_predictions(self):\n print \"Running the Predictions on the test dataset\"\n classifier = Classifier(self.train, self.test, self.targets, self.stratification, self.fts)\n _, y_pred_detail = classifier.classify_predict()\n self.output_file(y_pred_detail)",
"def fit_and_get_test_predictions(self, trace, tuning=True):\n trace_df = self.get_model_data_for_trace(trace)\n X_train, y_train, X_test, y_test = self.split_data(trace_df, tuning)\n self._fit(X_train, y_train)\n _, test_preds = self._get_train_and_test_predictions(X_train, X_test)\n return y_test, test_preds",
"def test_predict():\n predictions = model.predict(sentences)",
"def make_predictions(opts, model, dataset):\n if opts.num_examples_for_predict:\n dataset = tuple(x[:opts.num_examples_for_predict] for x in dataset)\n\n batched_dataset = (tf.data.Dataset.from_tensor_slices(dataset)\n .batch(_BATCH_SIZE_FOR_PREDICT))\n out = collections.defaultdict(list)\n for images, labels in tfds.as_numpy(batched_dataset):\n logits_samples = np.stack(\n [model.predict(images) for _ in range(opts.predictions_per_example)],\n axis=1) # shape: [batch_size, num_samples, num_classes]\n probs = scipy.special.softmax(logits_samples, axis=-1).mean(-2)\n out['labels'].extend(labels)\n out['logits_samples'].extend(logits_samples)\n out['probs'].extend(probs)\n if len(out['image_examples']) < _NUM_IMAGE_EXAMPLES_TO_RECORD:\n out['image_examples'].extend(images)\n\n return {k: np.stack(a) for k, a in six.iteritems(out)}",
"def fit_and_get_test_predictions(self, trace, tuning=True):\n pass",
"def _predict(self, test, train=None, decision_func=False):\n if train and not self._trained: \n self.fit(train)\n label_set = list(train.label_names.values())\n test_len = len(test.texts)\n if self.baseline == 'random':\n predictions = np.random.choice(label_set, size=test_len)\n elif self.baseline == 'majority':\n predictions = test_len * [Counter(train.labels).most_common(1)[0][0]]\n elif self.baseline == 'random_sample':\n prob = Counter(train.labels) \n prob = np.array([prob[l] for l in label_set]) / sum(prob.values())\n predictions = np.random.choice(label_set,\n size=test_len,\n p=prob)\n if decision_func:\n return predictions, None\n return predictions",
"def fit_predict(self, y, train=None, predict=None, show_steps=True):\r\n y_train = y[train] if train is not None else y\r\n if train is not None and predict is None:\r\n predict = [i for i in range(len(y)) if i not in train]\r\n\r\n stage0_train = []\r\n stage0_predict = []\r\n for model, feature_set in self.models:\r\n X_train, X_predict = get_dataset(feature_set, train, predict)\r\n\r\n identifier = train[0] if train is not None else -1\r\n cache_file = stringify(model, feature_set) + str(identifier)\r\n\r\n model_preds = self._get_model_preds(\r\n model, X_train, X_predict, y_train, cache_file)\r\n stage0_predict.append(model_preds)\r\n\r\n # if stacking, compute cross-validated predictions on the train set\r\n if self.stack:\r\n model_cv_preds = self._get_model_cv_preds(\r\n model, X_train, y_train, cache_file)\r\n stage0_train.append(model_cv_preds)\r\n\r\n # verbose mode: compute metrics after every model computation\r\n if show_steps:\r\n if train is not None:\r\n mean_preds, stack_preds, fwls_preds = self._combine_preds(\r\n np.array(stage0_train).T, np.array(stage0_predict).T,\r\n y_train, train, predict,\r\n stack=self.stack, fwls=self.fwls)\r\n\r\n model_auc = compute_auc(y[predict], stage0_predict[-1])\r\n mean_auc = compute_auc(y[predict], mean_preds)\r\n stack_auc = compute_auc(y[predict], stack_preds) \\\r\n if self.stack else 0\r\n fwls_auc = compute_auc(y[predict], fwls_preds) \\\r\n if self.fwls else 0\r\n\r\n logger.info(\r\n \"> AUC: %.4f (%.4f, %.4f, %.4f) [%s]\", model_auc,\r\n mean_auc, stack_auc, fwls_auc,\r\n stringify(model, feature_set))\r\n else:\r\n logger.info(\"> used model %s:\\n%s\", stringify(\r\n model, feature_set), model.get_params())\r\n\r\n if self.model_selection and predict is not None:\r\n best_subset = self._find_best_subset(y[predict], stage0_predict)\r\n stage0_train = [pred for i, pred in enumerate(stage0_train)\r\n if i in best_subset]\r\n stage0_predict = [pred for i, pred in enumerate(stage0_predict)\r\n if i in best_subset]\r\n\r\n mean_preds, stack_preds, fwls_preds = self._combine_preds(\r\n np.array(stage0_train).T, np.array(stage0_predict).T,\r\n y_train, stack=self.stack, fwls=self.fwls)\r\n\r\n if self.stack:\r\n selected_preds = stack_preds if not self.fwls else fwls_preds\r\n else:\r\n selected_preds = mean_preds\r\n\r\n return selected_preds",
"def save_predictions(self, split='test'):\n self.temporal_model.eval()\n print('Getting model predictions for {}-set ...'.format(split))\n dataloader = self.ERA5_data.dataloader_dict[split]\n with torch.no_grad():\n y_pred_series = []\n y_true_series = []\n inp_dates_seq = []\n out_dates_seq = []\n for idx, sample in enumerate(dataloader):\n x, y_true, dates = sample[0], sample[1], sample[2]\n if self.trainer_params.use_cuda and torch.cuda.is_available():\n for key in x.keys():\n x[key] = x[key].cuda()\n y_true = y_true.cuda()\n #y_true = self.spatial_embedding_model_dict['t2m'](y_true)\n y_true_series.append(y_true)\n y_pred_series.append(self.predict(x))\n inp_dates_seq.append(dates[0])\n out_dates_seq.append(dates[1])\n y_true_series = torch.cat(y_true_series, dim=0).cpu().detach().numpy()\n y_pred_series = torch.cat(y_pred_series, dim=0).cpu().detach().numpy()\n inp_dates_seq = torch.cat(inp_dates_seq, dim=0).cpu().detach().numpy()\n out_dates_seq = torch.cat(out_dates_seq, dim=0).cpu().detach().numpy()\n save_preds (\n y_true_series, y_pred_series,\n inp_dates_seq, out_dates_seq,\n self.scale_dict['t2m'],\n self.dataloader_params,\n split\n )",
"def finaltrain_and_predict(model_hps, optimizer_hps, train_hps, X_train, y_train, X_test):\n net = get_net(*model_hps)\n net.apply(init_weights)\n optimizer = optimize(net, *optimizer_hps)\n loss = nn.MSELoss() # define the loss function\n eval_metrics_train, _ = train(net, optimizer, loss, X_train, None, y_train, None, *train_hps)\n\n print(f'train log rmse: {eval_metrics_train[-1]}')\n plot(list(range(1, train_hps[0] + 1)), [eval_metrics_train],\n xlabel='epoch', ylabel='rmse', xlim=[1, train_hps[0]],\n legend=['train', 'valid'], yscale='linear')\n\n preds = net(X_test).detach().numpy()\n # Reformat it to export to Kaggle\n test = pd.read_csv('test.csv')\n test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test['Id'], test['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)",
"def do_resume_predict(self):\n pass",
"def _predict(self):\n\n resource_package = __name__ \n resource_path = '/'.join((_TRAINED_MODELS_DIR, self._model_path))\n path = pkg_resources.resource_filename(resource_package, resource_path)\n\n with tf.gfile.GFile(path, \"rb\") as f:\n restored_graph_def = tf.GraphDef()\n restored_graph_def.ParseFromString(f.read())\n\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(restored_graph_def, input_map=None, return_elements=None, name=\"\")\n\n sample_keep_prob = graph.get_tensor_by_name('keep_probs/sample_keep_prob:0')\n conv_keep_prob = graph.get_tensor_by_name('keep_probs/conv_keep_prob:0')\n is_training = graph.get_tensor_by_name('is_training:0')\n X = graph.get_tensor_by_name('sample/X:0')\n\n # add hook to output operation\n pred_cls = graph.get_tensor_by_name('predictions/ArgMax:0')\n\n with tf.Session(graph=graph) as sess:\n feed_dict = {sample_keep_prob : 1.,\n conv_keep_prob : 1.,\n is_training : False,\n X: self.data}\n\n # collect prediction\n self._pred_cls = sess.run(pred_cls, feed_dict = feed_dict)\n\n sess.close()",
"def after_pred(self):\n # If training then skip\n if self.training:\n return\n\n # Get ground truths in epoch 0 i.e. start of training\n if self.epoch == 0:\n self.y_true.extend(self.y.cpu().flatten().numpy())\n\n # Get predictions from each batch and add them to prediction container\n y_pred = self.pred.detach().cpu()\n \n self.y_pred.extend(y_pred.flatten().numpy())",
"def predict(self, test_data):\n random.seed(self.seed)\n preds = [{\"id\": instance['id'], \"prediction\": random.choice([0, 1])} for instance in test_data]\n return preds"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The managed object reference ID of the root resource pool for the cluster. | def resource_pool_id(self) -> str:
return pulumi.get(self, "resource_pool_id") | [
"def pool_id(self):\n return self._pool_id",
"def instance_pool_id(self) -> str:\n return pulumi.get(self, \"instance_pool_id\")",
"def cluster_resource_id(self) -> str:\n return pulumi.get(self, \"cluster_resource_id\")",
"def managed_object_id(self):\n o = self._data[\"managed_object\"]\n if type(o) in (int, long):\n return o\n return o.id",
"def get_pool_id():\n current = multiprocessing.current_process()\n return current._identity[0]-1",
"def identity_pool_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"identity_pool_id\")",
"def parent_cluster_resource_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"parent_cluster_resource_id\")",
"def get_ceph_object_pool_name(self, context):\n return self._ceph.get_ceph_object_pool_name()",
"def _pool_id(self, queue, project=None):\n return self._catalogue_ctrl.get(project, queue)['pool']",
"def root_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"root_id\")",
"def dsspool_id(self):\n return self._dsspool_id",
"def reference_id(self) -> str:\n return pulumi.get(self, \"reference_id\")",
"def get_parentID(self):\n parent = Collection.find(self.resource.container)\n return parent.uuid",
"def getId(self):\n return _libsbml.CompartmentReference_getId(self)",
"def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")",
"def get_aggregate_root_id(self):\n return self.__aggregate_root_id",
"def root_account_id(self):\n return self._root_account_id",
"def parent_cluster_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"parent_cluster_resource_id\")",
"def container_registry_managed_identity_client_id(self) -> str:\n return pulumi.get(self, \"container_registry_managed_identity_client_id\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The `ComputeCluster` data source can be used to discover the ID of a cluster in vSphere. This is useful to fetch the ID of a cluster that you want to use for virtual machine placement via the `VirtualMachine` resource, allowing to specify the cluster's root resource pool directly versus using the alias available through the `ResourcePool` data source. > You may also wish to see the `ComputeCluster` resource for more information about clusters and how to managed the resource in this provider. Example Usage ```python import pulumi import pulumi_vsphere as vsphere datacenter = vsphere.get_datacenter(name="dc01") compute_cluster = vsphere.get_compute_cluster(name="cluster01", datacenter_id=datacenter.id) ``` | def get_compute_cluster(datacenter_id: Optional[str] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetComputeClusterResult:
__args__ = dict()
__args__['datacenterId'] = datacenter_id
__args__['name'] = name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('vsphere:index/getComputeCluster:getComputeCluster', __args__, opts=opts, typ=GetComputeClusterResult).value
return AwaitableGetComputeClusterResult(
datacenter_id=pulumi.get(__ret__, 'datacenter_id'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
resource_pool_id=pulumi.get(__ret__, 'resource_pool_id')) | [
"def get_cluster_id(options):\n cluster = options.cluster\n datacenter = get_datacenter(options)\n for item in datacenter.hostFolder.childEntity:\n if (item.name == cluster):\n return item._GetMoId()",
"def get_cluster_by_id(self, context, cluster_id):",
"def cluster_id(self):\n node = self.get_node()\n try:\n return node.conf_get(\"cluster\", \"id\")\n except Exception as exc:\n pass\n import uuid\n cluster_id = str(uuid.uuid1())\n from cluster import ClusterSvc\n svc = ClusterSvc()\n svc.set_multi([\"cluster.id=\"+cluster_id], validation=False)\n return cluster_id",
"def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")",
"def cluster_resource_id(self) -> str:\n return pulumi.get(self, \"cluster_resource_id\")",
"def find_cluster(self, id: str) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def cluster(self, v):\n if 'cluster' in self.model.topology.node[v]:\n return self.model.topology.node[v]['cluster']\n else:\n return None",
"def cluster_id(self):\n return self._cluster_id",
"def get_cluster(self, name):\n return clusters.get_cluster(self, name)",
"def get_id(self, name):\n for cluster in self.list():\n if name == cluster['name']:\n return cluster['_id']\n raise NotFound(\"K8s cluster {} not found\".format(name))",
"def find_cluster(self, id):\n raise NotImplementedError",
"def get_cluster(name: str) -> dict:\n return ECS.get_clusters([name])[0]",
"def find_kubernetes_cluster(self, id: str) -> dto.KubernetesCluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]",
"def get_coe_cluster(self, name_or_id, filters=None):\n return _utils._get_entity(self, 'coe_cluster', name_or_id, filters)",
"def get_nodes_in_cluster(self, context, cluster_id):",
"def get_cluster(self, request, params):\n cluster_config = resource_cluster_config.filter_list(\n src_cluster_id=params[\"src_cluster_id\"],\n cluster_type=params[\"cluster_type\"],\n resource_type=params[\"resource_type\"],\n service_type=params[\"service_type\"],\n geog_area_code=params[\"geog_area_code\"],\n ).first()\n if cluster_config:\n return Response(model_to_dict(cluster_config))\n else:\n return Response(None)",
"def data_center_id(self) -> str:\n return pulumi.get(self, \"data_center_id\")",
"def get_one_cluster_by_name(ctx, cluster_name, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].get()\n pprint(cluster.data)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test addition for Complex with Complex, complex, int and float | def test_add():
z = Complex(1, -2)
w = Complex(1, 1)
assert (z + w) == Complex(2, -1)
assert (z + (1+1j)) == Complex(2, -1)
assert (z + 2) == Complex(3, -2)
assert (z + 2.0) == Complex(3, -2) | [
"def complex_sum(c_1,c_2):\n return c_1 + c_2",
"def __add__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum + other, self._imNum)\n if isinstance(other, complex):\n return Complex(self._reNum + other.real, self._imNum + other.imag)\n return Complex(self._reNum + other._reNum, self._imNum + other._imNum)",
"def __add__(self, other):\n if isinstance(self, int):\n return Complex(other.real + self, other.imag)\n else:\n return Complex(self.real + other.real, self.imag + other.imag)",
"def __add__(self,other):\n\t\treal = self.realPart + other.realPart\n\t\timaginary = self.imaginaryPart + other.imaginaryPart\n\n\t\t#create and return new complexnumber\n\t\treturn real,imaginary",
"def __add__(self, other):\n self.sum_complex_num = Complex((self.real + other.real), (self.imaginary + other.imaginary))\n return self.sum_complex_num",
"def test_ComplexNumber(a, b):\n py_cnum, my_cnum = complex(a, b), ComplexNumber(a, b)\n c = random.randint(-100, 101)\n d = random.randint(-100, 101)\n py_cnum2, my_cnum2 = complex(c, d), ComplexNumber(c, d)\n\n # Creating new complex objects to test operations\n my_add = my_cnum + my_cnum2\n py_add = py_cnum + py_cnum2\n my_sub = my_cnum - my_cnum2\n py_sub = py_cnum - py_cnum2\n my_mul = my_cnum * my_cnum2\n py_mul = py_cnum * py_cnum2\n my_div = my_cnum / my_cnum2\n py_div = py_cnum / py_cnum2\n\n # Validate the constructor.\n if my_cnum.real != a or my_cnum.imag != b:\n print(\"__init__() set self.real and self.imag incorrectly\")\n # Validate conjugate() by checking the new number's imag attribute.\n if py_cnum.conjugate().imag != my_cnum.conjugate().imag:\n print(\"conjugate() failed for\", py_cnum)\n # Validate __str__().\n if str(py_cnum) != str(my_cnum):\n print(\"__str__() failed for\", py_cnum)\n # Validate __abs__().\n if abs(my_cnum) != abs(py_cnum):\n print(\"__abs__() failed for\", py_cnum)\n # Validate __eq__().\n if my_cnum != my_cnum:\n print(\"conjugate() failed for\", py_cnum)\n # Validate __add__().\n if my_add.real != py_add.real or my_add.imag != py_add.imag:\n print(\"__add__() failed for\", py_cnum)\n # Validate __sub__().\n if my_sub.real != py_sub.real or my_sub.imag != py_sub.imag:\n print(\"__sub__() failed for\", py_cnum)\n # Validate __mul__().\n if my_mul.real != py_mul.real or my_mul.imag != py_mul.imag:\n print(\"__mul__() failed for\", py_cnum)\n # Validate __truediv__().\n if my_div.real != py_div.real or my_div.imag != py_div.imag:\n print(\"__div__() failed for\", py_cnum)\n return",
"def complex(real, imag):",
"def test_complex_number():\r\n number1 = ComplexNumber(1, -2)\r\n number2 = ComplexNumber(7.2, 0)\r\n\r\n assert number1.real_part == 1\r\n assert number1.imaginary_part == -2\r\n\r\n assert number2.real_part == 7.2\r\n assert number2.imaginary_part == 0",
"def sum(self, other):\n return ComplexNumber(self.real + other.real, self.imaginary + other.imaginary)",
"def _radd(lhs,rhs):\n if isinstance(lhs,numbers.Real): \n if lhs == 0.0:\n return rhs\n else:\n return UncertainReal(\n float.__add__(float(lhs),rhs.x)\n , vector.scale_vector(rhs._u_components,1.0)\n , vector.scale_vector(rhs._d_components,1.0)\n , vector.scale_vector(rhs._i_components,1.0)\n )\n \n elif isinstance(lhs,numbers.Complex):\n if lhs == 0.0:\n r = +rhs\n i = UncertainReal._constant(0.0)\n else:\n r = lhs.real + rhs \n i = UncertainReal._constant(lhs.imag)\n \n # Addition of a complex changes the type\n return UncertainComplex(r,i)\n \n else:\n assert False, 'unexpected'",
"def _add(lhs,rhs):\n if isinstance(rhs,UncertainReal):\n \n return UncertainReal(\n lhs.x + rhs.x\n , vector.merge_vectors(lhs._u_components,rhs._u_components)\n , vector.merge_vectors(lhs._d_components,rhs._d_components)\n , vector.merge_vectors(lhs._i_components,rhs._i_components)\n )\n \n elif isinstance(rhs,numbers.Real):\n if rhs == 0.0:\n return lhs\n else:\n return UncertainReal(\n lhs.x + rhs\n , vector.scale_vector(lhs._u_components,1.0)\n , vector.scale_vector(lhs._d_components,1.0)\n , vector.scale_vector(lhs._i_components,1.0)\n )\n elif isinstance(rhs,numbers.Complex):\n if rhs == 0.0:\n r = +lhs \n i = UncertainReal._constant(0.0)\n else:\n r = lhs + rhs.real \n i = UncertainReal._constant(rhs.imag)\n \n return UncertainComplex(r,i)\n \n else:\n assert False, 'unexpected'",
"def test_add_circle():\n c1 = Circle(2)\n c2 = Circle(4)\n c3 = c1 + c2\n assert c3.radius == 6\n # Adding another number\n c4 = c1 + 3\n assert c4.radius == 5\n # Reversing the order\n c5 = 1 + c1\n assert c5.radius == 3\n # adding an non-numeric\n with pytest.raises(TypeError):\n c6 = c1 + '10'",
"def test_addition():\n assert calculator.add(7, 3) == 10\n assert calculator.add(7.0, 3.0) == 10.0\n assert calculator.add(7, -3) == 4\n assert calculator.add(7.0, -3.0) == 4.0",
"def test_add_case2():\n q1 = session4.Qualean(random.sample(qualean_inputs, 1)[0])\n q2 = session4.Qualean(random.sample(qualean_inputs, 1)[0])\n q3 = session4.Qualean(random.sample(qualean_inputs, 1)[0])\n q_temp = q1 + q2 + q3\n\n q_sum = round(q1.number + q2.number + q3.number, 10)\n assert q_sum == q_temp.number, 'Sum implementation is buggy'",
"def test_adding_many_does_not_auto_simplify(self):\n op0, op1, op2 = qml.S(0), qml.T(0), qml.PauliZ(0)\n op = op0 + op1 + op2\n assert isinstance(op, Sum)\n assert len(op) == 2\n assert isinstance(op[0], Sum)\n assert qml.equal(op[0][0], op0)\n assert qml.equal(op[0][1], op1)\n assert qml.equal(op[1], op2)",
"def test_add_floats(self):\n\n result = calc2.Calc().add(10.5, 2)\n self.assertAlmostEqual(result, 12.5, 5)",
"def test_C_NotComplex(self):\n\t\tself.assertRaises(calc.NotComplexError, calc.it, M([[1 + 1j]]), 1, 10)",
"def is_complex(num):\n try:\n complex(num)\n except Exception:\n return False\n return True",
"def test_add_float(self):\n self.assertAlmostEqual(cr.add(2.21, 4.7), 2.21 + 4.7, places=2)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test subtraction for Complex with Complex, complex, int and float | def test_sub():
z = Complex(1, -2)
w = Complex(1, 1)
assert (z - w) == Complex(0, -3)
assert (z - (1+1j)) == Complex(0, -3)
assert (z - 2) == Complex(-1, -2)
assert (z - 2.0) == Complex(-1, -2) | [
"def complex_difference(c_1,c_2):\n return c_1 - c_2",
"def test_complex_number():\r\n number1 = ComplexNumber(1, -2)\r\n number2 = ComplexNumber(7.2, 0)\r\n\r\n assert number1.real_part == 1\r\n assert number1.imaginary_part == -2\r\n\r\n assert number2.real_part == 7.2\r\n assert number2.imaginary_part == 0",
"def test_ComplexNumber(a, b):\n py_cnum, my_cnum = complex(a, b), ComplexNumber(a, b)\n c = random.randint(-100, 101)\n d = random.randint(-100, 101)\n py_cnum2, my_cnum2 = complex(c, d), ComplexNumber(c, d)\n\n # Creating new complex objects to test operations\n my_add = my_cnum + my_cnum2\n py_add = py_cnum + py_cnum2\n my_sub = my_cnum - my_cnum2\n py_sub = py_cnum - py_cnum2\n my_mul = my_cnum * my_cnum2\n py_mul = py_cnum * py_cnum2\n my_div = my_cnum / my_cnum2\n py_div = py_cnum / py_cnum2\n\n # Validate the constructor.\n if my_cnum.real != a or my_cnum.imag != b:\n print(\"__init__() set self.real and self.imag incorrectly\")\n # Validate conjugate() by checking the new number's imag attribute.\n if py_cnum.conjugate().imag != my_cnum.conjugate().imag:\n print(\"conjugate() failed for\", py_cnum)\n # Validate __str__().\n if str(py_cnum) != str(my_cnum):\n print(\"__str__() failed for\", py_cnum)\n # Validate __abs__().\n if abs(my_cnum) != abs(py_cnum):\n print(\"__abs__() failed for\", py_cnum)\n # Validate __eq__().\n if my_cnum != my_cnum:\n print(\"conjugate() failed for\", py_cnum)\n # Validate __add__().\n if my_add.real != py_add.real or my_add.imag != py_add.imag:\n print(\"__add__() failed for\", py_cnum)\n # Validate __sub__().\n if my_sub.real != py_sub.real or my_sub.imag != py_sub.imag:\n print(\"__sub__() failed for\", py_cnum)\n # Validate __mul__().\n if my_mul.real != py_mul.real or my_mul.imag != py_mul.imag:\n print(\"__mul__() failed for\", py_cnum)\n # Validate __truediv__().\n if my_div.real != py_div.real or my_div.imag != py_div.imag:\n print(\"__div__() failed for\", py_cnum)\n return",
"def complex(real, imag):",
"def __sub__(self,other):\n\t\treal = self.realPart - other.realPart\n\t\timaginary = self.imaginaryPart - other.imaginaryPart\n\n\t\t#create and return complexNumber\n\t\treturn real,imaginary",
"def __complex__(self) -> complex:\n return self._translate_in_type(complex, self.integer, self.float_num)",
"def test_add():\n z = Complex(1, -2)\n w = Complex(1, 1)\n assert (z + w) == Complex(2, -1)\n assert (z + (1+1j)) == Complex(2, -1)\n assert (z + 2) == Complex(3, -2)\n assert (z + 2.0) == Complex(3, -2)",
"def test_subtraction():\n assert calculator.subtract(7, 3) == 4\n assert calculator.subtract(7.0, 3.0) == 4.0\n assert calculator.subtract(7, -3) == 10\n assert calculator.subtract(7.0, -3.0) == 10.0",
"def complex_sum(c_1,c_2):\n return c_1 + c_2",
"def test_C_NotComplex(self):\n\t\tself.assertRaises(calc.NotComplexError, calc.it, M([[1 + 1j]]), 1, 10)",
"def test_complex(self):\r\n z1 = dict(z=1+1j,u=(1,1),label='z1')\r\n uz1 = ucomplex(**z1)\r\n \r\n z2 = dict(z=2-1j,u=(.5,.5),label='z2')\r\n uz2 = ucomplex(**z2)\r\n \r\n x1 = dict(x=1,u=.1,label='x1')\r\n ux1 = ureal(**x1)\r\n\r\n y = -uz1 + uz2* ux1\r\n \r\n b = reporting.budget(y)\r\n self.assertEqual( len(b), 3)\r\n\r\n self.assertTrue( equivalent(b[0].u,1.0,TOL) )\r\n self.assertTrue( equivalent(b[1].u,0.5,TOL) )\r\n self.assertTrue( equivalent(b[2].u,math.sqrt((.1**2 + .2**2)/2),TOL) )\r\n\r\n b = reporting.budget(y,influences=[ux1,uz1])\r\n self.assertEqual( len(b), 2)\r\n \r\n self.assertTrue( equivalent(b[0].u,1.0,TOL) )\r\n self.assertTrue( equivalent(b[1].u,math.sqrt((.1**2 + .2**2)/2),TOL) )",
"def complex_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, numbers.Complex):\n name = type(var).__name__\n raise ComplexError(\n 'Function {} expected complex number, {} got instead.'.format(func, name))",
"def __complex__(self): \n return complex(self.real, self.imag)",
"def check_type_not_complex(cls, number: Number) -> None:\n if isinstance(number, complex):\n print(\"Calculator supports arithmetic only with integers\",\n \"and floats but not with complex numbers\")\n return False\n return True",
"def test_subtract_less_zero(self):\n result = calculation.subtract(-5,-6)\n self.assertEqual(result,1)",
"def CHECK_REAL_OR_COMPLEX(testee):\n try:\n COMPLEXTYPE(testee)\n except TypeError:\n\t# not a complex number\n raise\n try:\n FLOATTYPE(testee)\n return 1\n except TypeError:\n return 0",
"def is_complex(num):\n try:\n complex(num)\n except Exception:\n return False\n return True",
"def _complex_abs(x, Tout=None, name=None):\n result = _op_def_lib.apply_op(\"ComplexAbs\", x=x, Tout=Tout, name=name)\n return result",
"def enforceComplex(prompt):\n testInput = input(prompt)\n testInput = testInput.replace(\" \", \"\")\n try:\n return complex(testInput)\n except:\n return f'Input \"{testInput}\" cannot be converted into a complex number'"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute LDA model & find perplexity, save topics list for coherence calc | def lda_models(doc_term_matrix, n_topics, vectorizer, rand_start):
perplexity_values = []
lda_time = []
topics_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
lda_model = LatentDirichletAllocation(n_components=num_topics, doc_topic_prior = 1/num_topics,
topic_word_prior=0.1, n_jobs=39, random_state = i)
lda_model.fit_transform(doc_term_matrix)
t2 = time.time()
lda_time.append(t2-t1)
print(f" Model time: {t2-t1}", flush = True)
# compute perplexity
perplexity_values.append(lda_model.bound_)
# create list of topics
topics = list_topics(lda_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
print('Number of topics =', num_topics, "complete.", flush = True)
return perplexity_values, lda_time, topics_list | [
"def docs_to_topic(X):\n\n#---------------------------\n def step(k):\n # TODO prior\n #lda = LdaModel(corpus=X, num_topics = k, alpha=prior, random_state=0)\n lda = LdaModel(corpus=X, num_topics = k, random_state=0)\n preds = lda[X]\n argmax = [ max(topics, key=itemgetter(1))[0] for topics in preds]\n topics, topic_counts = np.unique(argmax, return_counts=True)\n return topics, topic_counts, argmax\n\n k = len(X) # init: k=N\n prev = np.zeros(k)\n i = 1\n\n # Find starting topics\n print(f\"Iteration {i}: running with k={k}\")\n # first run with a symmetric alpha prior\n lda = LdaModel(corpus=X, num_topics = k, alpha='symmetric', random_state=0)\n preds = lda[X]\n argmax = [ max(topics, key=itemgetter(1))[0] for topics in preds]\n topics, topic_counts = np.unique(argmax, return_counts=True)\n\n # Convergence criterium is only a heuristic w.r.t. the paper\n # Checks topic counts instead of ensuring all document per topic stay the same \n while not np.array_equal(prev, topic_counts):\n i += 1\n prev = topic_counts\n k = len(topics) \n print(f\"Iteration {i}: running with k={k}\")\n topics, topic_counts, argmax = step(k)\n\n print(f\"Converged. {len(topics)} topics.\")\n print(f\"Topic counts: {topic_counts}\")\n print(f\"Topic assignments: {argmax}\")\n\n docs_per_topic = defaultdict(list)\n\n # Create a dictionary with docs per topic\n for topic_id in topics:\n # Uncommented following line if you want to return actual docs\n #docs_per_topic[ topic_id ].append(X[np.where(argmax == topic_id)])\n docs_per_topic[ topic_id ].append(np.where(argmax == topic_id))\n return docs_per_topic",
"def fit_lda_model(self):\n self.id2word = corpora.Dictionary(self.documents)\n self.id2word.filter_extremes(no_below=20, no_above=0.5)\n corpus = [self.id2word.doc2bow(text) for text in self.documents]\n alpha = list(np.arange(0.1, 1, 0.3))\n alpha.append(\"symmetric\")\n beta = copy.deepcopy(alpha)\n alpha.append(\"asymmetric\")\n corpus_sets = [gensim.utils.ClippedCorpus(corpus, int(len(corpus) * 0.75)), corpus]\n corpus_titles = [\"75% corpus\", \"100% corpus\"]\n model_results = {\"Validation_set\": [], \"Topics\": [], \"Alpha\": [], \"Beta\": [], \"Coherence\": []}\n print(\"Fitting models\")\n for i, corpus_set in enumerate(corpus_sets):\n for num_topics in self.topics_to_test:\n for a in alpha:\n for b in beta:\n lda_model = gensim.models.LdaMulticore(corpus=corpus_set, id2word=self.id2word, alpha=a,\n random_state=100, chunksize=100, passes=20,\n num_topics=num_topics,\n per_word_topics=True, minimum_probability=0, eta=b)\n if i == 1: # we only want to save the model if it's a model on the whole corpus\n if not os.path.exists(f\"data/intermediate/hyperparameter_testing\"):\n os.mkdir(f\"data/intermediate/hyperparameter_testing\")\n with open(f\"data/intermediate/hyperparameter_testing/lda_{num_topics}_\"\n f\"topics{a}_alpha_{b}_eta.pkl\", \"wb\") as file_out:\n pickle.dump(lda_model, file_out)\n coherence_model_lda = CoherenceModel(model=lda_model, texts=self.documents,\n dictionary=self.id2word, coherence='c_v')\n coherence = coherence_model_lda.get_coherence()\n print(f\"Topic {num_topics}, alpha {a} eta {b} corpus {corpus_titles[i]} coherence: {coherence}\")\n model_results['Validation_set'].append(corpus_titles[i])\n model_results['Topics'].append(num_topics)\n model_results['Alpha'].append(a)\n model_results['Beta'].append(b)\n model_results['Coherence'].append(coherence)\n pd.DataFrame(model_results).to_csv(\"hyperparamter_tuning_results.csv\", index=False)",
"def lda(colData):\n colData=normalize_corpus(colData)\n #Create Bigrams and Trigrams\n bigram = gensim.models.Phrases(colData, min_count=5,threshold = 100) #higher threshold fewer phrases\n trigram = gensim.models.Phrases(bigram[colData],threshold=100)\n\n #faster way to get sentence clubbed as trigtam/bigram\n bigram_mod = gensim.models.phrases.Phraser(bigram)\n trigram_mod=gensim.models.phrases.Phraser(trigram)\n\n data_word_bigrams = make_bigrams(colData,bigram_mod)\n\n\n #Create dictionary\n id2word = corpora.Dictionary(data_word_bigrams)\n\n #Create corpus\n texts = data_word_bigrams\n\n #term doc frequency\n corpus = [id2word.doc2bow(text) for text in texts]\n\n topics = get_num_topics(corpus, id2word, data_word_bigrams) \n #print(\"corpus:\",corpus[:1])\n #building the topic model\n lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=id2word,num_topics=10,random_state=100,\n update_every=1, chunksize=100,passes=10,alpha='auto',per_word_topics=True)\n\n pprint(lda_model.print_topics())\n #pp = lda_model.print_topics()\n doc_lda = lda_model[corpus]\n\n # Compute Perplexity\n print('\\n Model Perplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.\n\n # Compute Coherence Score\n coherence_model_lda = CoherenceModel(model=lda_model, texts= data_word_bigrams, dictionary=id2word, coherence='c_v')\n coherence_lda = coherence_model_lda.get_coherence()\n print('\\n Model Coherence Score: ', coherence_lda)\n \n df_topic_sents_keywords = sentence_topic(lda_model, corpus, colData)\n \n # Format\n df_dominant_topic = df_topic_sents_keywords.reset_index()\n df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']\n\n #Most representative Sentence per topic\n # Display setting to show more characters in column\n pd.options.display.max_colwidth = 100\n\n output = pd.DataFrame()\n sent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic')\n\n\n for i, grp in sent_topics_outdf_grpd:\n output = pd.concat([output, grp.sort_values(['Perc_Contribution'], ascending=False).head(1)], \n axis=0)\n\n # Reset Index \n output.reset_index(drop=True, inplace=True)\n # Format\n output.columns = ['Topic_Num', \"Topic_Perc_Contrib\", \"Keywords\", \"Representative Text\"]\n \n #create most_rep_sentence column\n ls=[]\n for topic in df_dominant_topic['Dominant_Topic']:\n text=list(output['Representative Text'][output['Topic_Num']==topic])\n ls.append(text[0])\n #print(\"type: \", type(ls[0]))\n ls=pd.Series(ls)\n df_dominant_topic['Most Representative Text for Topic']=ls\n \n \n\n# Format\n return df_dominant_topic",
"def _lda(self):\n self.ldamodel = gensim.models.ldamodel.LdaModel(self.gensim_corpus, \n num_topics=self.n_topics, \n id2word=self.id_map, \n passes=self.n_passes,\n random_state=42)\n \n self.topic_matrix = self.ldamodel.print_topics(num_topics=self.n_topics, \n num_words=self.n_words)",
"def run_lda_and_compute_perplexity(save_path, documents, corpus, vocab, K, alpha, eta):\n lda_model = run_lda(save_path,\n documents,\n corpus,\n vocab,\n \"generated_files/\",\n (K, alpha, eta))\n return lda_model.log_perplexity(corpus)",
"def train_lda_topic_model_with_mallet(texts, path_mallet,\n terms_to_remove=[], num_topics=50,\n no_below=10, no_above=0.9,\n scoring=False, start=2, step=3):\n preprocessed_corpus = []\n print ('training of gensim corpus began')\n for i, text in enumerate(texts):\n if i == 0:\n # todo filter here\n text = text.split()\n\n # Additional filtering steps #\n \"\"\"\n filtered_text = [word for word in text if (word[0] in\n string.ascii_uppercase + string.ascii_lowercase)]\n\n filtered_text = [word for word in filtered_text if\n (word not in set(stopwords.words('english')))]\n preprocessed_corpus.append(filtered_text)\n \"\"\"\n\n dct = initialize_gensim_dictionary([text])\n else:\n text = text.split()\n # Additional filtering steps\n\n \"\"\"\n filtered_text = [word for word in text if (word[0] in\n string.ascii_uppercase + string.ascii_lowercase)]\n\n filtered_text = [word for word in filtered_text if\n (word not in set(stopwords.words('english')))]\n preprocessed_corpus.append(filtered_text)\n \"\"\"\n add_documents_to_gensim_dictionary(dct, [text])\n # todo:this is to be integrated to the building process\n\n if len(terms_to_remove) > 0:\n for term in terms_to_remove:\n dct.filter_tokens(bad_ids=[dct.token2id[term]])\n\n dct.filter_extremes(no_below=no_below, no_above=no_above)\n\n gensim_corpus = [dct.doc2bow(bag_of_word.split()) for bag_of_word in texts]\n print ('gensim corpus done')\n if scoring:\n\n coherence_values = []\n\n for n in range(start, num_topics, step):\n\n lda = LdaMallet(constants.PATH_TO_MALLET,\n gensim_corpus, id2word=dct,\n num_topics=n)\n coherencemodel = CoherenceModel(model=lda,\n texts=preprocessed_corpus,\n dictionary=dct, coherence='c_v')\n coherence_values.append(coherencemodel.get_coherence())\n\n return coherence_values\n\n else:\n lda = LdaMallet(constants.PATH_TO_MALLET, gensim_corpus,\n id2word=dct, num_topics=num_topics)\n # Visualize LDA results, poor results obtained.\n # from gensim.models.wrappers import ldamallet\n # lda_model = ldamallet.malletmodel2ldamodel(lda)\n # vis = pyLDAvis.gensim.prepare(lda_model, gensim_corpus, dct)\n # pyLDAvis.save_html(vis , 'test.html')\n return {'model': lda, 'corpus': gensim_corpus}",
"def optimize(self):\n scores = []\n n_topics = np.arange(self.topic_range[0], self.topic_range[1]+1)\n print('Running optimization with topic range from {0} to {1}'.format(\n self.topic_range[0],self.topic_range[1]))\n self._preproc()\n\n # Perform LDA for topic_range\n for n in n_topics:\n self.n_topics = n\n self._lda()\n if self.verbose:\n print('LDA completed for {0} topics.'.format(n))\n self._evaluate()\n scores.append(self.score)\n \n # Visualize results\n print('Optimization completed, plotting results...')\n fig1, ax1 = plt.subplots()\n ax1.plot(n_topics, np.asarray(scores))\n ax1.set_title('Coherence for topic range from {0} to {1}'.format(\n self.topic_range[0], self.topic_range[1]), fontsize= 16)\n ax1.set_xlabel('n_topics')\n ax1.set_ylabel('score')\n ax1.set_xticks(n_topics)\n plt.show()",
"def __init__(self, corpus=None, num_topics=100, id2word=None,\n distributed=False, chunksize=2000, passes=1, update_every=1,\n alpha='symmetric', eta=None, decay=0.5, offset=1.0,\n eval_every=10, iterations=50, gamma_threshold=0.001,\n minimum_probability=0.01, random_state=None, ns_conf={},\n minimum_phi_value=0.01, per_word_topics=False):\n\n # store user-supplied parameters\n self.id2word = id2word\n if corpus is None and self.id2word is None:\n raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality')\n\n if self.id2word is None:\n logger.warning(\"no word id mapping provided; initializing from corpus, assuming identity\")\n self.id2word = utils.dict_from_corpus(corpus)\n self.num_terms = len(self.id2word)\n elif len(self.id2word) > 0:\n self.num_terms = 1 + max(self.id2word.keys())\n else:\n self.num_terms = 0\n\n if self.num_terms == 0:\n raise ValueError(\"cannot compute LDA over an empty collection (no terms)\")\n\n self.distributed = bool(distributed)\n self.num_topics = int(num_topics)\n self.chunksize = chunksize\n self.decay = decay\n self.offset = offset\n self.minimum_probability = minimum_probability\n self.num_updates = 0\n\n self.passes = passes\n self.update_every = update_every\n self.eval_every = eval_every\n self.minimum_phi_value = minimum_phi_value\n self.per_word_topics = per_word_topics\n\n self.alpha, self.optimize_alpha = self.init_dir_prior(alpha, 'alpha')\n\n assert self.alpha.shape == (self.num_topics,), \"Invalid alpha shape. Got shape %s, but expected (%d, )\" % (str(self.alpha.shape), self.num_topics)\n\n if isinstance(eta, six.string_types):\n if eta == 'asymmetric':\n raise ValueError(\"The 'asymmetric' option cannot be used for eta\")\n\n self.eta, self.optimize_eta = self.init_dir_prior(eta, 'eta')\n\n self.random_state = utils.get_random_state(random_state)\n\n assert (self.eta.shape == (self.num_terms,) or self.eta.shape == (self.num_topics, self.num_terms)), (\n \"Invalid eta shape. Got shape %s, but expected (%d, 1) or (%d, %d)\" %\n (str(self.eta.shape), self.num_terms, self.num_topics, self.num_terms))\n\n # VB constants\n self.iterations = iterations\n self.gamma_threshold = gamma_threshold\n\n # set up distributed environment if necessary\n if not distributed:\n logger.info(\"using serial LDA version on this node\")\n self.dispatcher = None\n self.numworkers = 1\n else:\n if self.optimize_alpha:\n raise NotImplementedError(\"auto-optimizing alpha not implemented in distributed LDA\")\n # set up distributed version\n try:\n import Pyro4\n with utils.getNS(**ns_conf) as ns:\n from gensim.models.lda_dispatcher import LDA_DISPATCHER_PREFIX\n self.dispatcher = Pyro4.Proxy(ns.list(prefix=LDA_DISPATCHER_PREFIX)[LDA_DISPATCHER_PREFIX])\n logger.debug(\"looking for dispatcher at %s\" % str(self.dispatcher._pyroUri))\n self.dispatcher.initialize(id2word=self.id2word, num_topics=self.num_topics,\n chunksize=chunksize, alpha=alpha, eta=eta, distributed=False)\n self.numworkers = len(self.dispatcher.getworkers())\n logger.info(\"using distributed version with %i workers\" % self.numworkers)\n except Exception as err:\n logger.error(\"failed to initialize distributed LDA (%s)\", err)\n raise RuntimeError(\"failed to initialize distributed LDA (%s)\" % err)\n\n # Initialize the variational distribution q(beta|lambda)\n self.state = LdaState(self.eta, (self.num_topics, self.num_terms))\n self.state.sstats = self.random_state.gamma(100., 1. / 100., (self.num_topics, self.num_terms))\n self.expElogbeta = np.exp(dirichlet_expectation(self.state.sstats))\n\n # if a training corpus was provided, start estimating the model right away\n if corpus is not None:\n use_numpy = self.dispatcher is not None\n self.update(corpus, chunks_as_numpy=use_numpy)",
"def learn_topic_model_activities(self):\n print \"\\nLearning a topic model with LDA:\"\n\n doc_topic, topic_word = tm.run_topic_model(self.accu_path, self.config['lda'])\n\n tm.dump_lda_output(self.lda_path, doc_topic, topic_word)\n print \"Topic Modelling - done.\\n\"\n return True",
"def initialize_corpus() -> None:\n\n # set globals\n global ciesek_lemma, drosten_lemma, ciesek_token, drosten_token\n\n # Step 1: Load data from csv-file. Goal: list of questions, separated by asked scientist\n drosten_questions = get_question_list('CD')\n ciesek_questions = get_question_list('SC')\n\n # Step 2: Remove stopwords.\n drosten_questions = remove_stopwords(drosten_questions)\n ciesek_questions = remove_stopwords(ciesek_questions)\n\n print('Number of questions to Drosten: ', len(drosten_questions))\n print('Number of questions to Ciesek: ', len(ciesek_questions))\n\n # Step 3: Tokenize and lemmatize list with questions.\n ciesek_token = tokenize(ciesek_questions)\n ciesek_lemma = lemmatizer(ciesek_questions)\n drosten_token = tokenize(drosten_questions)\n drosten_lemma = lemmatizer(drosten_questions)\n\n# ----- not necessary part for lda -------\n # follow description on:\n # https://radimrehurek.com/gensim/auto_examples/tutorials/run_lda.html#sphx-glr-auto-examples-tutorials-run-lda-py\n\n # Step 4: Compute bigrams and trigrams.\n ciesek_bigrams = gen_ngrams(ciesek_lemma, 2)\n ciesek_trigrams = gen_ngrams(ciesek_lemma, 3)\n drosten_bigrams = gen_ngrams(drosten_lemma, 2)\n drosten_trigrams = gen_ngrams(drosten_lemma, 3)\n\n # add bigrams and trigrams to list\n c_words = merge_lists(ciesek_lemma, ciesek_bigrams, ciesek_trigrams)\n d_words = merge_lists(drosten_lemma, drosten_bigrams, drosten_trigrams)",
"def investigate_topics(model, loaded_data, labels, videos, prob_of_words, language_indices, _lambda, n_top_words = 30):\n\n topic_word = model.topic_word_\n doc_topic = model.doc_topic_\n code_book, graphlets_, uuids, miss_labels = loaded_data\n print \"1\"\n import pdb; pdb.set_trace()\n\n true_labels = labels\n vocab = [hash for hash in list(code_book)]\n graphs = loaded_data[1]\n # ****************************************************************************************************\n # Relevance\n # ****************************************************************************************************\n names_list = [i.lower() for i in ['Alan','Alex','Andy','Amy','Michael','Ben','Bruno','Chris','Colin','Collin','Ellie','Daniel','Dave','Eris','Emma','Helen','Holly','Jay','the_cleaner','Jo','Luke','Mark','Louis','Laura', 'Kat','Matt','Nick','Lucy','Rebecca','Jennifer','Ollie','Rob','Ryan','Rachel','Sarah','Stefan','Susan']]\n\n relevant_words = {}\n for i, phi_kw in enumerate(topic_word):\n\n phi_kw = threshold(np.asarray(phi_kw), 0.00001)\n log_ttd = [_lambda*math.log(y) if y!=0 else 0 for y in phi_kw]\n log_lift = [(1-_lambda)*math.log(y) if y!=0 else 0 for y in phi_kw / probability_of_words]\n relevance = np.add(log_ttd, log_lift)\n\n # cnt = 0\n # import pdb; pdb.set_trace()\n # for h, g in zip(np.asarray(vocab)[relevance >2.1], graphs[relevance >2.1]):\n # o, s, t = object_nodes(g)\n # if \"hand\" in o and \"object_14\" in o and len(s) == 2:\n # print h, s, t\n # cnt+=1\n # print cnt\n # genome_rel(relevance, i)\n\n inds = np.argsort(relevance)[::-1]\n # top_relevant_words_in_topic = np.array(vocab)[inds] #[:-(n_top_words+1):-1]\n # pdb.set_trace()\n relevant_language_words_in_topic = []\n\n for ind in inds:\n word = vocab[ind]\n\n #todo: somehting is wrong here.\n if relevance[ind] <= 1.0 and word.isalpha() and word not in names_list:\n relevant_language_words_in_topic.append(word)\n # pdb.set_trace()\n relevant_words[i] = relevant_language_words_in_topic[:10]\n\n # print(\"\\ntype(topic_word): {}\".format(type(topic_word)))\n # print(\"shape: {}\".format(topic_word.shape))\n print \"objects in each topic: \"\n topics = {}\n for i, topic_dist in enumerate(topic_word):\n objs = []\n top_words_in_topic = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1]\n\n #print('Topic {}: {}'.format(i, ' '.join( [repr(i) for i in top_words_in_topic] )))\n # for j in [graphlets[k] for k in top_words_in_topic]:\n # objs.extend(object_nodes(j)[0])\n topics[i] = objs\n print('Topic {}: {}'.format(i, list(set(objs))))\n print top_words_in_topic\n\n # #Each document's most probable topic\n restricted_labels, restricted_videos = [], []\n pred_labels = []\n\n for n in xrange(doc_topic.shape[0]):\n #print [p for p in doc_topic[n] if p >= 0.0] # each document probabilities to each topic\n if max(doc_topic[n]) > class_thresh:\n # print true_labels[n]\n # print doc_topic[n]\n # print doc_topic[n].argmax()\n # doc_topic[n][doc_topic[n].argmax()] = 0\n restricted_labels.append(true_labels[n])\n restricted_videos.append(videos[n])\n topic_most_pr = doc_topic[n].argmax()\n pred_labels.append(topic_most_pr)\n\n #if dbg: print(\"doc: {} topic: {}\".format(n, topic_most_pr))\n true_labels = restricted_labels\n videos = restricted_videos\n print \"2\"\n import pdb; pdb.set_trace()\n\n return true_labels, pred_labels, videos, relevant_words",
"def evaluate_lda(model, dictionary, corpus, texts, calculate_coherence=True, use_multicore=False):\n # perplexity = model.log_perplexity(corpus)\n coherence_lda = None\n if calculate_coherence:\n coherence_model_lda = CoherenceModel(model=model, texts=texts, dictionary=dictionary,\n coherence='c_v', processes=N_WORKERS if use_multicore else 1)\n coherence_lda = coherence_model_lda.get_coherence()\n return 0, coherence_lda",
"def build_model_gensim(corpus, id2word, num_topics=20, validset=None):\n\n # Build LDA model\n lda_model = gensim.models.ldamulticore.LdaMulticore(corpus=corpus,\n id2word=id2word,\n num_topics=num_topics,\n random_state=100,\n eval_every=5,\n chunksize=10000, #nb of docs in each training chunk\n passes=50,\n iterations=500,\n alpha=0.001,\n per_word_topics=True,\n workers=4,)\n\n print(\"eta\",lda_model.eta)\n print(\"alpha\",lda_model.alpha)\n\n if validset:\n valid_corpus, valid_id2word, valid_data_lemmatized = validset\n print(lda_model.log_perplexity(valid_corpus, len(valid_corpus)))\n\n return lda_model",
"def investigate_topics(model, code_book, labels, videos, prob_of_words, _lambda, n_top_words = 30):\n\n topic_word = model.topic_word_\n doc_topic = model.doc_topic_\n # code_book, graphlets, uuids, miss_labels = loaded_data\n # print \"1\"\n # import pdb; pdb.set_trace()\n\n true_labels = labels\n vocab = [hash for hash in list(code_book)]\n\n # ****************************************************************************************************\n # Relevance\n # ****************************************************************************************************\n # names_list = [i.lower() for i in ['Alan','Alex','Andy','Amy','Michael','Ben','Bruno','Chris','Colin','Collin','Ellie','Daniel','Dave','Eris','Emma','Helen','Holly','Jay','the_cleaner',\n # 'Jo','Luke','Mark','Louis','Laura', 'Kat','Matt','Nick','Lucy','Rebecca','Jennifer','Ollie','Rob','Ryan','Rachel','Sarah','Stefan','Susan']]\n\n relevant_words = {}\n for i, phi_kw in enumerate(topic_word):\n\n phi_kw = threshold(np.asarray(phi_kw), 0.00001)\n log_ttd = [_lambda*math.log(y) if y!=0 else 0 for y in phi_kw]\n log_lift = [(1-_lambda)*math.log(y) if y!=0 else 0 for y in phi_kw / prob_of_words]\n relevance = np.add(log_ttd, log_lift)\n\n # cnt = 0\n # import pdb; pdb.set_trace()\n # for h, g in zip(np.asarray(vocab)[relevance >2.1], graphs[relevance >2.1]):\n # o, s, t = object_nodes(g)\n # if \"hand\" in o and \"object_14\" in o and len(s) == 2:\n # print h, s, t\n # cnt+=1\n # print cnt\n # vis.genome_rel(relevance, i)\n\n inds = np.argsort(relevance)[::-1]\n # top_relevant_words_in_topic = np.array(vocab)[inds] #[:-(n_top_words+1):-1]\n # pdb.set_trace()\n relevant_language_words_in_topic = []\n\n for ind in inds:\n word = vocab[ind]\n\n #todo: somehting is wrong here.\n if relevance[ind] <= 1.0 and word.isalpha() and word not in names_list:\n relevant_language_words_in_topic.append(word)\n # pdb.set_trace()\n relevant_words[i] = relevant_language_words_in_topic[:10]\n\n # print(\"\\ntype(topic_word): {}\".format(type(topic_word)))\n # print(\"shape: {}\".format(topic_word.shape))\n # print \"objects in each topic: \"\n topics = {}\n for i, topic_dist in enumerate(topic_word):\n objs = []\n top_words_in_topic = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1]\n\n #print('Topic {}: {}'.format(i, ' '.join( [repr(i) for i in top_words_in_topic] )))\n # for j in [graphlets[k] for k in top_words_in_topic]:\n # objs.extend(object_nodes(j)[0])\n topics[i] = objs\n # print('Topic {}: {}'.format(i, list(set(objs))))\n # print top_words_in_topic\n\n # #Each document's most probable topic\n restricted_labels, restricted_videos = [], []\n pred_labels = []\n\n for n in xrange(doc_topic.shape[0]):\n #print [p for p in doc_topic[n] if p >= 0.0] # each document probabilities to each topic\n if max(doc_topic[n]) > class_thresh:\n # print true_labels[n]\n # print doc_topic[n]\n # print doc_topic[n].argmax()\n # doc_topic[n][doc_topic[n].argmax()] = 0\n restricted_labels.append(true_labels[n])\n restricted_videos.append(videos[n])\n topic_most_pr = doc_topic[n].argmax()\n pred_labels.append(topic_most_pr)\n\n #if dbg: print(\"doc: {} topic: {}\".format(n, topic_most_pr))\n true_labels = restricted_labels\n videos = restricted_videos\n # print \"2\"\n # import pdb; pdb.set_trace()\n\n return true_labels, pred_labels, videos, relevant_words",
"def lda_description(review_text, min_topic_freq=0.05,topic_model_file='lda_model_10'):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n \n # parse the review text with spaCy\n parsed_review = nlp(review_text)\n \n # lemmatize the text and remove punctuation and whitespace\n unigram_review = [token.lemma_ for token in parsed_review\n if not punct_space(token)]\n \n # apply the first-order and secord-order phrase models\n bigram_review = bigram_model[unigram_review]\n trigram_review = trigram_model[bigram_review]\n \n # remove any remaining stopwords\n trigram_review = [term for term in trigram_review\n if not term in spacy.lang.en.STOP_WORDS]\n #print('bow:',trigram_review)\n \n # create a bag-of-words representation\n review_bow = sents_dict.doc2bow(trigram_review)\n \n \n # create an LDA representation\n lda = LdaMulticore.load(joinp(pilot_path, topic_model_file)) # my addition\n review_lda = lda[review_bow]\n \n \n # mine\n if topic_model_file=='lda_model_25':\n topic_names=topic_names_25\n elif topic_model_file=='lda_model_10':\n topic_names=topic_names_10\n #\n \n # sort with the most highly related topics first\n #review_lda = sorted(review_lda, key=lambda topic_number,freq: freq)\n listt=[]\n for topic_number, freq in review_lda:\n if freq < min_topic_freq:\n break\n \n # print the most highly related topic names and frequencies\n #print('{:10} {}'.format(topic_names[topic_number],round(freq, 3))) ## for now not putting yet topic names\n #print('{:25} {}'.format(topic_number,round(freq, 3))) \n x=[topic_number,topic_names[topic_number],np.round(freq, 3)]\n listt.append(x)\n return(listt)",
"def create_topic_model(data_train, project_key, data_test, labels_train, labels_test,path):\n results = pd.DataFrame(columns=['project_key', 'usability_label', 'num_topics', 'feature_importance', 'accuracy_rf',\n 'confusion_matrix_rf', 'classification_report_rf', 'area_under_pre_recall_curve_rf',\n 'avg_precision_rf', 'area_under_roc_curve_rf', 'y_pred_rf', 'precision_rf',\n 'recall_rf', 'thresholds_rf', 'y_test', 'features'])\n # select by result prediction:\n path = ''\n num_topic_list = [2, 3, 4, 5, 6, 7, 8, 9, 10]\n words = 10\n # train\n text_train_list = []\n for row in data_train['clean_text_new']:\n text_train_list.append(row)\n # Creating the term dictionary of our corpus, where every unique term is assigned an index.\n dictionary = corpora.Dictionary(text_train_list)\n # Converting list of documents (corpus) into Document Term Matrix using dictionary prepared above.\n corpus = [dictionary.doc2bow(doc) for doc in text_train_list]\n # test\n text_test_list = []\n for row in data_test['clean_text_new']:\n text_test_list.append(row)\n corpus_test = [dictionary.doc2bow(doc) for doc in text_test_list]\n for num in num_topic_list:\n lda_model = create_gensim_lda_model(text_train_list, num, words, dictionary, corpus)\n df_topic_sents_keywords = format_topics_sentences(lda_model, corpus, text_train_list)\n # Format\n df_dominant_topic = df_topic_sents_keywords.reset_index()\n df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords', 'Text']\n df_topic_sents_keywords_test = format_topics_sentences(lda_model, corpus_test, text_test_list)\n # Format\n df_dominant_topic_test = df_topic_sents_keywords_test.reset_index()\n df_dominant_topic_test.columns = ['Document_No', 'Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords',\n 'Text']\n df_dominant_topic = df_dominant_topic.reset_index(drop=True)\n df_dominant_topic_test = df_dominant_topic_test.reset_index(drop=True)\n\n # results only dominant topic\n\n x_train = pd.DataFrame(df_dominant_topic['Dominant_Topic'])\n x_test = pd.DataFrame(df_dominant_topic_test['Dominant_Topic'])\n # run results:\n accuracy_rf, confusion_matrix_rf, classification_report_rf, area_under_pre_recall_curve_rf, avg_pre_rf, \\\n avg_auc_rf, y_pred_rf, feature_importance, precision_rf, recall_rf, \\\n thresholds_rf = run_random_forest(x_train, x_test, labels_train['usability_label'],\n labels_test['usability_label'])\n\n df = {'project_key': project_key, 'usability_label': 'is_change_text_num_words_5', 'num_topics': num,\n 'feature_importance': feature_importance, 'accuracy_rf': accuracy_rf,\n 'confusion_matrix_rf': confusion_matrix_rf, 'classification_report_rf': classification_report_rf,\n 'area_under_pre_recall_curve_rf': area_under_pre_recall_curve_rf, 'avg_precision_rf': avg_pre_rf,\n 'area_under_roc_curve_rf': avg_auc_rf, 'y_pred_rf': y_pred_rf,\n 'precision_rf': precision_rf, 'recall_rf': recall_rf, 'thresholds_rf': thresholds_rf,\n 'y_test': labels_test['usability_label'], 'features': 'only topic model one hot'}\n\n results = results.append(df, ignore_index=True)\n\n # results dominant topic dummies\n\n x_train = pd.get_dummies(x_train, columns=['Dominant_Topic'], drop_first=True)\n x_test = pd.get_dummies(x_test, columns=['Dominant_Topic'], drop_first=True)\n # Get missing columns in the training test\n missing_cols = set(x_train.columns) - set(x_test.columns)\n # Add a missing column in test set with default value equal to 0\n for c in missing_cols:\n x_test[c] = 0\n # Ensure the order of column in the test set is in the same order than in train set\n x_test = x_test[x_train.columns]\n\n # run results:\n accuracy_rf, confusion_matrix_rf, classification_report_rf, area_under_pre_recall_curve_rf, avg_pre_rf, \\\n avg_auc_rf, y_pred_rf, feature_importance, precision_rf, recall_rf, \\\n thresholds_rf = run_random_forest(x_train, x_test, labels_train['usability_label'],\n labels_test['usability_label'])\n\n d = {'project_key': project_key, 'usability_label': 'is_change_text_num_words_5', 'num_topics': num,\n 'feature_importance': feature_importance, 'accuracy_rf': accuracy_rf,\n 'confusion_matrix_rf': confusion_matrix_rf, 'classification_report_rf': classification_report_rf,\n 'area_under_pre_recall_curve_rf': area_under_pre_recall_curve_rf, 'avg_precision_rf': avg_pre_rf,\n 'area_under_roc_curve_rf': avg_auc_rf, 'y_pred_rf': y_pred_rf,\n 'precision_rf': precision_rf, 'recall_rf': recall_rf, 'thresholds_rf': thresholds_rf,\n 'y_test': labels_test['usability_label'], 'features': 'only topic model dummy'}\n\n results = results.append(d, ignore_index=True)\n\n results.to_csv(\n '{}/topic_model/results_{}_label_is_change_text_num_words_5.csv'.format(\n path,project_key),\n index=False)",
"def run_lda(args, corpus, pre, dictionary=None, workers=None, docs=None, num_files=None):\n MALLET_PATH = os.environ.get(\"MALLET_PATH\", \"lda-tools/ext/mallet/bin/mallet\")\n if args.gensim:\n lda = gensim.models.wrappers.LdaMallet\n model = lda(MALLET_PATH, corpus, num_topics=args.num_topics,\n id2word=dictionary, optimize_interval=args.optimize_interval,\n workers=workers, iterations=args.num_iterations,\n prefix=pre)\n else:\n rand_prefix = hex(random.randint(0, 0xffffff))[2:] + '-'\n prefix = os.path.join(tempfile.gettempdir(), rand_prefix)\n mallet_corpus = prefix + 'corpus'\n\n print('Generating topic model.')\n form = 'tsv' if args.tsv_corpus else \"text\"\n tsv_corpus = None\n if not args.tsv_corpus:\n os.makedirs(mallet_corpus)\n corpus.export(mallet_corpus, abstract=False, form=form)\n elif args.year_split != -1:\n year, lines = docs\n os.makedirs(mallet_corpus)\n tsv_corpus = os.path.join(mallet_corpus, str(year) + \"-tmp.tsv\")\n with open(tsv_corpus, 'w') as f:\n f.write(\"\\n\".join(lines))\n else:\n tsv_corpus = args.tsv_corpus\n\n mallet_corpus = None if args.tsv_corpus else mallet_corpus\n model = Mallet(MALLET_PATH, mallet_corpus, num_topics=args.num_topics,\n iters=args.num_iterations, bigrams=args.bigrams_only,\n topical_n_grams=args.topical_n_grams,\n remove_stopwords=(not args.topical_n_grams), prefix=pre,\n print_output=True, file=tsv_corpus, min_df=args.min_df,\n max_df=args.max_df, num_files=num_files)\n return model",
"def build_model_lda(self, file_model_lda, num_of_topics=100, num_pass=4, \n num_iter=100, use_stored_model=True):\n \n # Check if model already exists and should be loaded\n if os.path.isfile(file_model_lda) and use_stored_model: \n print(\"Load stored LDA model ...\")\n self.model_lda = gensim.models.LdaModel.load(file_model_lda)\n else:\n if use_stored_model:\n print(\"Stored LDA model not found!\")\n print(\"Calculating new LDA model...\")\n self.model_lda = gensim.models.LdaModel(self.bow_corpus, id2word=self.dictionary, \n num_topics=num_of_topics, passes=num_pass, iterations=num_iter) \n \n # Save model\n self.model_lda.save(file_model_lda)\n \n # Output the Keyword in the 10 topics\n pprint(\"Keyword in the 10 topics\")\n pprint(self.model_lda.print_topics())",
"def _calculate_perplexity(docs: List[List[int]], theta: np.array, phi: np.array) -> float:\n # 计算在 test_docs 里面所有词出现的概率,对主题 z 求积分\n sum_n_d = 0\n sum_log_p = 0.0\n\n probability_doc_topic = theta\n probability_topic_term = phi\n\n for m, doc in enumerate(docs):\n for n, word in enumerate(doc):\n sum_log_p = sum_log_p + \\\n np.log(\n np.dot(probability_topic_term[:, word], probability_doc_topic[m, :]))\n sum_n_d = sum_n_d + 1\n return np.exp(sum_log_p/(-sum_n_d))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Workaround manage.py migrate complications run syncdb in case it's our first run, so we make sure south_migrationhistory table is created run migrate to apply latest migrations run syncdb again to populate contrib.auth.models | def smart_syncdb_migrate(self):
local('python manage.py syncdb')
local('python manage.py migrate')
local('python manage.py syncdb --all') | [
"def migrate():\n puts(yellow(\"Run South migrations\"))\n django_manage('migrate')",
"def post_migrations(self):",
"def migrate():\n run(\"./manage.py migrate\")",
"def migrate(self):\n\tpass",
"def migrate_database(self):\n\n self.db.migrate_database()",
"def syncdb():\n command(\"syncdb\", \"migrate\")",
"def model_post_migrate(*args, **kwargs):\n global IN_MIGRATIONS\n IN_MIGRATIONS = False",
"def apply_migrations():\n config = Config(ALEMBIC_CONFIG)\n upgrade(config, 'head')",
"def southify(app):\n managepy('migrate %s 0001 --fake' % app)\n managepy('migrate %s' % app)",
"def setup_before_migration(self, apps):",
"def migration():",
"def migrate() -> None:\n typer.echo('Starting migration...')\n subprocess.run(['alembic', 'upgrade', 'head'])",
"def model_pre_migrate(*args, **kwargs):\n global IN_MIGRATIONS\n IN_MIGRATIONS = True",
"def migrate_new_apps():\n new_apps = run('%s %s/fabfiles/django_scripts/get_apps_without_migration.py'\n % (env.PYTHON_BIN, env.SRC_PATH))\n # The script denotes the start of its output by \"{% output %}\" tag so we\n # only take whatever's after that\n new_apps = new_apps.split('{% output %}')[1].split()\n with cd(env.SRC_PATH):\n for app in new_apps:\n sudo(\"%s manage.py schemamigration %s --initial\" %\n (env.PYTHON_BIN, app.strip()))\n sudo(\"%s manage.py migrate %s --no-initial-data\" %\n (env.PYTHON_BIN, app.strip()))",
"def migrate_instance():\n logger.debug(\"Migration not yet supported.\")",
"def test_v1_migrate(self):\n pass",
"def db_migrate():\n when = str(int(time.time()))\n sql_file = os.path.join(MIGRATION_FOLDER, f\"{when}.sql\")\n\n with open(sql_file, 'w') as save_sql:\n up = MYSQL_UP.format(f\"upgrade-{when}\", when, MIGRATION_TABLE)\n down = MYSQL_DOWN.format(f\"downgrade-{when}\", when, MIGRATION_TABLE)\n\n save_sql.write(\"\\n\\n\".join([up, down]))\n LOGGER.info(f\"migration file: {os.path.join('migrations', sql_file)}\")",
"def test_v1alpha3_migrate(self):\n pass",
"def setup_database():\n from django.core.management import call_command\n from django import setup\n setup()\n call_command('migrate', verbosity=0, interactive=False)\n call_command('loaddata', data('initial_data.json'), verbosity=0, interactive=False)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ssum([1,2,3]) 6 ssum([2,3]) 5 ssum([3]) 3 ssum([]) 0 | def ssum(L: list) -> int:
return 0 if not L else L[0]+ssum(L[1:]) | [
"def total(ls):\n if ls == []:\n return 0\n return ls[0] + total(ls[1:])",
"def zero_sum(list):\n if not list:\n return 0\n else:\n return sum(list)",
"def sum(numbers):",
"def mysum(items) :",
"def example1(S):\n n = len(S)\n total = 0\n for j in range(n): # loop from 0 to n-1\n total += S[j]\n return total",
"def sum_unique(l):\n pass",
"def zsum(s, *args, **kwargs):\n return 0 if s.empty else s.sum(*args, **kwargs)",
"def my_sum(items):\n if len(items) == 0:\n return 0\n return items[0] + my_sum(items[1:])",
"def example2(S):\n n = len(S)\n total = 0\n for j in range(0, n, 2): # note the increment of 2\n total += S[j]\n return total",
"def get_sum(numbers: List[int]) -> int:\n pass",
"def sum3(nums):\n return sum(nums)",
"def lucas(n):\n lucval = sum_series(n, 2, 1)\n print(lucval)\n return lucval",
"def sum_of_squares(items) :\n return sum([item ** 2 for item in items])",
"def multiplication_total_of(num_list):",
"def lsummult (list1,list2):\r\n if len(list1) <> len(list2):\r\n raise ValueError, \"Lists not equal length in summult.\"\r\n s = 0\r\n for item1,item2 in pstats.abut(list1,list2):\r\n s = s + item1*item2\r\n return s",
"def sum_list(nums):\n\n if nums == []:\n return 0\n\n return nums[0] + sum_list(nums[1:])",
"def ll_sum(t):\n result = 0\n for num in t:\n result += sum(num)\n return result",
"def mysum(items) :\n total = 0\n for item in items :\n total += item\n return total",
"def sum_of_squares(xs):\n total = 0\n\n for x in xs:\n total = total + (x * x)\n\n return total"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
hello hell hel he h ниже вызовы стека для закоментированной строки иначе елка перевернется pars_str1(hello) '\nh\nhe\nhel\nhell\nhello' pars_str1(hell) '\nh\nhe\nhel\nhell' pars_str1(hel) '\nh\nhe\nhel' pars_str1(he) '\nh\nhe' pars_str1(h) '\nh' pars_str1() '' для незакоментированной строки елка развернулась pars_str1(hello) 'hello\nhell\nhel\nhe\nh\n' pars_str1(hell) 'hell\nhel\nhe\nh\n' pars_str1(hel) 'hel\nhe\nh\n' pars_str1(he) 'he\nh\n' pars_str1(h) 'h\n' pars_str1() '' | def pars_str1(stroka: str) -> str:
if stroka:
return f'{stroka}\n{pars_str1(stroka[:-1])}'
# return f'{pars_str1(stroka[:-1])}\n{stroka}''
return '' | [
"def str_pre_regexp(self, anystring, debug_lvl = 0):\n\t\t\n\t\tstrlen = len(anystring)\n\t\t\n\t\t# A) préparation du contenu\n\t\t# --------------------------\n\t\tsubtokens = re_TOUS.findall(anystring)\n\t\t\n\t\t# £TODO now read those params in conf\n\t\tdo_cesure=True\n\t\tdo_espace=True\n\t\tdo_newline=True\n\t\tdo_char_classes=True\n\t\t\n\t\t\n\t\tif do_espace and do_newline:\n\t\t # autorise 1 saut de ligne, 2 espaces et 1x toutes poncts\n\t\t # (ex: ',' entre nom et prénom) -------------\n\t\t r_INTER_WORD = '[¤ \\W]{0,4}'\n\t\telif do_espace:\n\t\t r_INTER_WORD = '[ \\W]{0,3}'\n\t\telif do_newline:\n\t\t r_INTER_WORD = '[¤\\W]{0,2}'\n\t\telse:\n\t\t r_INTER_WORD = '[\\W]?'\n\t\t\n\t\t# autorise césure, saut de ligne, espace\n\t\tr_INTER_CHAR = '[-¤ ]{0,3}'\n\t\t\n\t\t# on ne fait pas la césure pour les locutions courtes\n\t\tif (not do_cesure or strlen < 6):\n\t\t\t# re: chaîne de base \n\t\t\t# ------------------\n\t\t\t# autorisant un ou des passage-s à la ligne à chaque limite \n\t\t\t# limites selon re_FINDALL = (\\b et/ou bords de ch. ponct)\n\t\t\tmy_re_str = r_INTER_WORD.join(r\"%s\" % re.escape(u) for u in subtokens)\n\t\t\n\t\t# expression + sioux pour permettre césure inattendue et erreurs OCR\n\t\telse:\n\t\t\tminlen = strlen\n\t\t\t# on permet 3 caractères en plus tous les 80 caractères\n\t\t\tmaxlen = strlen + ((strlen // 80)+1) * 3\n\t\t\t\n\t\t\t# lookahead sur /./ ==> exprime la contrainte de longueur de la regex qui suivra\n\t\t\tre_length_prefix = r\"(?=.{%i,%i})\" % (minlen, maxlen)\n\t\t\t\n\t\t\tinterpolated_tokens = []\n\t\t\t\n\t\t\t# interpolations dans chaque token...\n\t\t\tfor u in subtokens:\n\t\t\t\tinterpold_word=\"\"\n\t\t\t\tarray_c_re = []\n\t\t\t\t\n\t\t\t\t# ... donc pour chaque **caractère**\n\t\t\t\t# =========================\n\t\t\t\tfor c in u:\n\t\t\t\t\t# each character regexp\n\t\t\t\t\tc_re = \"\"\n\t\t\t\t\t\n\t\t\t\t\t# cas simple sans traitement OCR\n\t\t\t\t\t# ----------\n\t\t\t\t\tif not do_char_classes or (c not in XTokinfo.OCR_CLASSES):\n\t\t\t\t\t\t# esc !\n\t\t\t\t\t\tc_re = re.escape(c)\n\t\t\t\t\t\t\n\t\t\t\t\t\t# store\n\t\t\t\t\t\tarray_c_re.append(c_re)\n\t\t\t\t\t\n\t\t\t\t\t# cas avec OCR: sub/caractère/groupe de caractères 'semblables'/g\n\t\t\t\t\t# -------------\n\t\t\t\t\telse:\n\t\t\t\t\t\tc_matchables = XTokinfo.OCR_SIMILAR_CHARACTER[c]\n\t\t\t\t\t\t# esc + joined alternatives\n\t\t\t\t\t\tc_alter = '|'.join(map(re.escape,c_matchables))\n\t\t\t\t\t\t\n\t\t\t\t\t\t# ex : regexp = '(?:i|l)'\n\t\t\t\t\t\tc_re += '(?:' + c_alter + ')'\n\t\t\t\t\t\t\n\t\t\t\t\t\t# store\n\t\t\t\t\t\tarray_c_re.append(c_re)\n\t\t\t\t\t\n\t\t\t\t\t# dans les 2 cas: césure\n\t\t\t\t\t# ----------------------\n\t\t\t\t\t# on va ajouter /-?/ entre ch. \"regexp caractère\" (ou re_INTER_CHAR)\n\t\t\t\t\tinterpold_word = r_INTER_CHAR.join(array_c_re)\n\t\t\t\t\n\t\t\t\tinterpolated_tokens.append(interpold_word)\n\t\t\t\t\n\t\t\t\tmy_re_str = re_length_prefix + r_INTER_WORD.join(r\"%s\" % u \n\t\t\t\t for u in interpolated_tokens)\n\t\t\t\t\n\t\t\t\t# exemple\n\t\t\t\t# ====x_str==== Oxidation of Metals\n\t\t\t\t# ====re_str==== (?=.{19,22})(?:O|0))[-¤ ]{0,3}x[-¤ ]{0,3}(?:i|\\;|l)[-¤ ]{0,3}(?:d|cl)[-¤ ]{0,3}(?:a|u|n)[-¤ ]{0,3}t[-¤ ]{0,3}(?:i|\\;|l)[-¤ ]{0,3}(?:o|c)[-¤ ]{0,3}n[¤ ]{0,2}(?:o|c)[-¤ ]{0,3}(?:f|t)[¤ ]{0,2}M[-¤ ]{0,3}(?:e|c)[-¤ ]{0,3}t[-¤ ]{0,3}(?:a|u|n)[-¤ ]{0,3}(?:1|l|i|I|\\]|\\/|Z)[-¤ ]{0,3}s\n\t\t\t\n\t\t\n\t\t\n\t\tif debug_lvl >= 2 :\n\t\t\tprint(\"SUBTOKS\", subtokens)\n\t\t\n\t\tif debug_lvl >= 3 :\n\t\t\tprint(\"pre_regexp:\", file=sys.stderr)\n\t\t\tprint(\"\\t=x_str=\", anystring, file=sys.stderr)\n\t\t\tprint(\"\\t=re_str=\", my_re_str, file=sys.stderr)\n\t\t\n\t\t\n\t\t# B) Décision du format des limites gauche et droite pour les \\b\n\t\t# --------------------------------------------------\n\t\t# test si commence par une ponctuation\n\t\tif re.search(r'^\\W', subtokens[0]):\n\t\t\tre_boundary_prefix = \"\"\n\t\telse:\n\t\t\tre_boundary_prefix = \"\\\\b\"\n\t\t# idem à la fin\n\t\tif re.search(r'\\W$', subtokens[-1]):\n\t\t\tre_boundary_postfix = \"\"\n\t\telse:\n\t\t\tre_boundary_postfix = \"\\\\b\"\n\t\t\n\t\t# voilà\n\t\treturn re_boundary_prefix + my_re_str + re_boundary_postfix",
"def test_regexp_chunk_parser():",
"def test_with_no_loose_commas(self):\r\n self.assertEquals(parse_tag_input('one two \"thr,ee\"'), [u'one', u'thr,ee', u'two'])",
"def test_clean_regexp_str_good(self):\n self.__test_clean_regexp_good_h(self.STR, \"foo\")",
"def process_template(template, args, ill_sg_vowel=None):\n parts = []\n delparts = []\n for x in template:\n if x.isdigit():\n k = int(x)\n if k in args:\n v = args[k]\n else:\n v = args.get(x, \"\")\n if v == \"(')\":\n v = \"\"\n if x == \"9\":\n if \"par_sg_a\" in args:\n parts.append(args[\"par_sg_a\"])\n else:\n if not delparts:\n return None\n parts.append(delparts[-1])\n if x == \"3\" and not v:\n # XXX what exactly was this kludge for...? I'm not sure if\n # this is now handled by other means (default value for last\n # argument).\n v = EMPTY_CHAR\n for y in v:\n parts.append(y)\n elif x == \"@\":\n if ill_sg_vowel is not None:\n parts.append(ill_sg_vowel)\n else:\n p = \"\".join(parts + delparts)\n m = re.search(r\"([aeiouyåäöAEIOUYÅÄÖ])\"\n r\"[^aeiouyåäöAEIOUYÅÄÖ]*$\",\n p)\n if m:\n parts.append(m.group(1).lower())\n else:\n m = re.search(r\"[éÉ]\"\n r\"[^aeiouyåäöAEIOUYÅÄÖ]*$\",\n p)\n if m:\n parts.append(\"e\")\n elif p:\n ch = last_char_to_vowel(p[-1])\n parts.append(ch)\n else:\n return None\n elif x == \"A\":\n a = args.get(\"par_sg_a\", None)\n if a:\n parts.append(a)\n else:\n p = \"\".join(parts + delparts)\n parts.append(word_to_aae(p))\n elif x == \"O\":\n p = \"\".join(parts + delparts)\n if needs_aou(p):\n parts.append(\"o\")\n else:\n parts.append(\"ö\")\n elif x == \"U\":\n p = \"\".join(parts + delparts)\n if needs_aou(p):\n parts.append(\"u\")\n else:\n parts.append(\"y\")\n elif x == \"D\":\n p = \"\".join(parts)\n if not p:\n return None\n if p[-1] in \"rnml\":\n parts.append(p[-1])\n else:\n parts.append(\"d\")\n elif x == \"I\":\n # Inserts either previously removed character or \"e\" if it was\n # \"i\".\n if not delparts:\n return None\n if delparts[-1] == \"i\":\n parts.append(\"e\")\n else:\n parts.append(delparts[-1])\n elif x == \"-\":\n # Drop last, move to delparts so it counts for gradation\n if not parts:\n return None\n p = parts.pop()\n if p not in \"aeiouyäöp\": # Must be vowel or p\n return None\n delparts.append(p)\n elif x == \"/\":\n # Drop second to last\n if len(parts) < 2:\n return None\n p = parts.pop()\n if p not in \"aeiouyäö\": # Must be vowel\n return None\n p2 = parts.pop()\n if p2 not in \"aeiouyäö\": # Must be vowel\n return None\n parts.append(p)\n else:\n parts.append(x)\n v = \"\".join(parts)\n if v.find(EMPTY_CHAR) >= 0:\n for ch in \"aeiouyäöAEIOUYÄÖ\":\n v = re.sub(\"([aeiouyäöAEIOUYÄÖ]\" + ch + \")\" + EMPTY_CHAR +\n \"(\" + ch + \")\", r\"\\1'\\2\", v)\n v = re.sub(EMPTY_CHAR, \"\", v)\n return v",
"def e_silaba(chars):\r\n # Funcoes internas (apenas usadas dentro da funcao e_silaba)\r\n def e_silaba2(chars):\r\n \"\"\"\r\n Funcao permite verificar se um dado conjunto de caracteres forma silaba valida de 2 letras\r\n \"\"\"\r\n if (chars[0:2]) in par_vogais: ## Se os dois primeiros caracteres juntos formarem um par_vogal valido\r\n return True\r\n elif chars[0] in consoante and chars[1] in vogal: ## Se os dois primeiros caracteres formarem uma consoante e vogal,respetivamente\r\n return True\r\n elif chars[0] in vogal and chars[1] in consoante_final: ## Se os dois primeiros caracteres formarem uma vogal e consoante_terminal\r\n return True\r\n else:\r\n return False\r\n\r\n def e_silaba3(chars):\r\n \"\"\"\r\n Funcao permite verificar se um dado conjunto de caracteres forma silaba valida de 3 letras\r\n \"\"\"\r\n if (chars[0:3]) in (\"QUA\", \"QUE\", \"QUI\", \"GUE\", \"GUI\"): ## Se os tres primeiros (e unicos) caracteres estiverem no tuplo\r\n return True\r\n elif chars[0] in vogal and chars[1:] == \"NS\":\r\n return True\r\n elif chars[0] in consoante and chars[1:] in par_vogais:\r\n return True\r\n elif chars[0] in consoante and chars[1] in vogal and chars[2] in consoante_final:\r\n return True\r\n elif chars[0:2] in par_vogais and chars[2] in consoante_final:\r\n return True\r\n elif chars[0:2] in par_consoantes and chars[2] in vogal:\r\n return True\r\n else:\r\n return False\r\n\r\n if type(chars) != str:# Verifica se e string\r\n raise ValueError(\"e_silaba:argumento invalido\")\r\n tam = len(chars)\r\n #Dependendo do tamanho da string, averigua se e uma silaba valida\r\n if tam == 1:\r\n if chars[0] in vogal:\r\n return True\r\n else:\r\n return False\r\n elif tam == 2:\r\n return e_silaba2(chars)\r\n elif tam == 3:\r\n return e_silaba3(chars)\r\n elif tam == 4:\r\n return e_silaba4(chars)\r\n elif tam == 5:\r\n return e_silaba5(chars)\r\n else:\r\n return False",
"def parse_from_placeholder(string,pattern,encloser='%',matcher='(.+)'):\n pattern,fields = placeholder_to_regex(pattern,encloser,matcher)\n return parse_from_regex(string,pattern,fields)",
"def test_parse_situées():\n result = parse(SITUEES)\n assert result == (\"%20\"\"chutes\"\"%20\"\"du%20Niagara\")",
"def fallback_parse_accept_language(str, n_langs):\n\n al = string.split(str, ',', n_langs)[:n_langs] # Take only the first n_langs\n al = map(validated_accept_language, al)\n while len(al) < n_langs: al.append('') # Add more if there aren't enough\n return tuple(al)",
"def define_str():\n # 문자열의 정의\n # 한줄 문자열의 정의\n s1 = \"Hello Python\" # 쌍따옴표, 홑따옴표 모두 가능\n s2 = str(\"Hello Python\") # 객체 타입으로 생성\n s3 = str(3.14159) # 다른 타입을 문자열로 캐스팅\n\n print(s1, s2, s3)\n print(type(s1), type(s2), type(s3))\n\n # 인스턴스 체크\n print(\"s1은 str인가?\", isinstance(s1, str))\n\n # 여러 줄 문자열 : \"\"\", '''\n s4 = \"\"\"파이썬의 여러줄 문자열\n 여러 줄 문자열은 다양한 용도로 사용되기도 한다\"\"\"\n print(s4)\n\n # 여러 줄 문자열의 활용\n \"\"\"파이썬은 기본적으로는 여러 줄 주석을 허용하지 않음\n 여러 줄 문자열을 여러줄 주석으로 사용할 수 있다\"\"\"\n\n # 여러 줄 문자열은 함수, 클래스 선언부 아래쪽에\n # 여러 줄 문자열을 입력하면 문서화, 도움말 출력시\n # 이 문자열을 출력한다",
"def test_parse_str_blocks(self):\n path = os.path.join(ARC_PATH, 'arc', 'testing', 'rotor_scans', 'H2O2.out')\n str_blks = parser.parse_str_blocks(\n path, 'Initial Parameters', '--------', regex=False, tail_count=3)\n desire_str_lists = [\n ' ! Initial Parameters !\\n',\n ' ! (Angstroms and Degrees) !\\n',\n ' -------------------------- --------------------------\\n',\n ' ! Name Definition Value Derivative Info. !\\n',\n ' --------------------------------------------------------------------------------\\n',\n ' ! R1 R(1,2) 1.4252 calculate D2E/DX2 analytically !\\n',\n ' ! R2 R(1,3) 0.9628 calculate D2E/DX2 analytically !\\n',\n ' ! R3 R(2,4) 0.9628 calculate D2E/DX2 analytically !\\n',\n ' ! A1 A(2,1,3) 101.2687 calculate D2E/DX2 analytically !\\n',\n ' ! A2 A(1,2,4) 101.2687 calculate D2E/DX2 analytically !\\n',\n ' ! D1 D(3,1,2,4) 118.8736 Scan !\\n',\n ' --------------------------------------------------------------------------------\\n']\n self.assertEqual(len(str_blks), 1)\n self.assertEqual(str_blks[0], desire_str_lists)",
"def str_to_rule(str_in):\r\n log.debug(\"str_to_rule: \"+str_in.strip())\r\n str_i = str_in.strip().split('#')[0].strip()\r\n if len(str_i)>0:\r\n dic_rule = dict(valid=False,type='para',errors=list(),key=\"\",val=\"\")\r\n if(str_i[0]==\"%\"): # % Parameter str_i\r\n lst_par = str_i[1:].split('=')\r\n lst_par = [par.strip() for par in lst_par]\r\n if lst_par[0] in lst_known_para:\r\n dic_rule['key']=lst_par[0].strip()\r\n dic_rule['val']=lst_par[1].strip()\r\n dic_rule['valid']=True\r\n log.info('Parameter recognised: '+str(dic_rule['key'])+' = '+str(dic_rule['val']))\r\n else:\r\n dic_rule['valid']=False\r\n dic_rule['errors'].append(\"Unrecognised parameter: \"+lst_par[0])\r\n log.warning('#205 > '+str(dic_rule['errors'])+' raw line: '+str_i)\r\n elif(str_i[0]==\":\"): # : Rule str_i\r\n dic_rule = dict(valid=False,type='rule',errors=list(),id=\"\",title=\"\",mode=\"\",data_table=\"\",condition=\"\",action=\"\",act_param=\"\",comment=\"\")\r\n lst_items = str_i[1:].split(\":\")\r\n lst_items = [itm.strip() for itm in lst_items]\r\n if len(lst_items)==8:\r\n dic_rule['id']=lst_items[0]\r\n dic_rule['title']=lst_items[1]\r\n dic_rule['mode']=lst_items[2].upper()\r\n dic_rule['layer']=lst_items[3]\r\n dic_rule['condition']=lst_items[4]\r\n dic_rule['action']=lst_items[5]\r\n dic_rule['act_param']=lst_items[6]\r\n dic_rule['comment']=lst_items[7]\r\n dic_rule = sanity_check(dic_rule)\r\n if not dic_rule['valid']:\r\n log.warning('#203 invalid rule > '+str(dic_rule['errors'])+' raw line: '+str_in)\r\n log.debug('parsed good rule: '+str(dic_rule))\r\n else:\r\n dic_rule['errors'].append(\"Rule string does not contain the correct number of elements - Check that you comment do not contain ':'. Ignoring this rule. \\n\\t\"+str_in.strip()+\"\\n\\t\"+str(len(lst_items))+'\\t'+str(lst_items))\r\n log.warning('#202 '+dic_rule['errors'])\r\n dic_rule['valid']=False\r\n else:\r\n dic_rule['errors'].append(\"Rule string must start with #, % or : But I found: \"+str_in[0]+\" in line (\"+str_in+\")\")\r\n log.warning('#201 '+str(dic_rule['errors'][-1:]))\r\n dic_rule['valid']=False\r\n else: # Empty (or only comments) str_i\r\n return {'type':'null', 'valid':True}\r\n return dic_rule",
"def __defaultCaseForParseString(self, rootForm: str, parseString: str, partOfSpeech: str) -> str:\n if parseString == \"P3SG+NOM$PNON+ACC\":\n if partOfSpeech == \"PROP\":\n return \"PNON+ACC\"\n else:\n return \"P3SG+NOM\"\n elif parseString == \"A2SG+P2SG$A3SG+P3SG\":\n return \"A3SG+P3SG\"\n elif parseString == \"A3PL+P3PL+NOM$A3PL+P3SG+NOM$A3PL+PNON+ACC$A3SG+P3PL+NOM\":\n return \"A3PL+P3SG+NOM\"\n elif parseString == \"P2SG$P3SG\":\n return \"P3SG\"\n elif parseString == \"A3PL+PNON+NOM$A3SG+PNON+NOM^DB+VERB+ZERO+PRES+A3PL\":\n return \"A3PL+PNON+NOM\"\n elif parseString == \"P2SG+NOM$PNON+GEN\":\n return \"PNON+GEN\"\n elif parseString == \"AOR^DB+ADJ+ZERO$AOR+A3SG\":\n return \"AOR+A3SG\"\n elif parseString == \"P2SG$PNON\":\n return \"PNON\"\n elif parseString == \"ADV+SINCE$VERB+ZERO+PRES+COP+A3SG\":\n if rootForm == \"yıl\" or rootForm == \"süre\" or rootForm == \"zaman\" or rootForm == \"ay\":\n return \"ADV+SINCE\"\n else:\n return \"VERB+ZERO+PRES+COP+A3SG\"\n elif parseString == \"CONJ$VERB+POS+IMP+A2SG\":\n return \"CONJ\"\n elif parseString == \"NEG+IMP+A2SG$POS^DB+NOUN+INF2+A3SG+PNON+NOM\":\n return \"POS^DB+NOUN+INF2+A3SG+PNON+NOM\"\n elif parseString == \"NEG+OPT+A3SG$POS^DB+NOUN+INF2+A3SG+PNON+DAT\":\n return \"POS^DB+NOUN+INF2+A3SG+PNON+DAT\"\n elif parseString == \"NOUN+A3SG+P3SG+NOM$NOUN^DB+ADJ+ALMOST\":\n return \"NOUN+A3SG+P3SG+NOM\"\n elif parseString == \"ADJ$VERB+POS+IMP+A2SG\":\n return \"ADJ\"\n elif parseString == \"NOUN+A3SG+PNON+NOM$VERB+POS+IMP+A2SG\":\n return \"NOUN+A3SG+PNON+NOM\"\n elif parseString == \"INF2+A3SG+P3SG+NOM$INF2^DB+ADJ+ALMOST$\":\n return \"INF2+A3SG+P3SG+NOM\"\n else:\n return None",
"def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])",
"def preprocess(self, volpiano: str, strict: bool = None):\n if strict is None: strict = self.strict\n\n if volpiano[0] not in '12':\n raise ClefError('Missing clef: the volpiano does not start with a clef (1 or 2)')\n \n if re.match('^[12]-?[^-]', volpiano):\n raise HyphenationError('Invalid clef hyphenation: chant should start with 1-- or 1---')\n \n # Mixed hyphenation: start with 1-- (2 hyphens), but still contains word \n # boundaries (3 hyphens). \n has_standard_hyphenation = volpiano.startswith('1---') or volpiano.startswith('2---')\n if not has_standard_hyphenation and re.match('.*[^-]---[^-]', volpiano):\n if strict:\n raise HyphenationError('Mixed hyphenation: starts with 1--, but contains word boundaries')\n else:\n # Todo debug\n volpiano = volpiano[0] + '---' + volpiano[3:]\n elif not has_standard_hyphenation:\n if strict:\n raise HyphenationError('Chant contains no word boundaries')\n else:\n volpiano = (volpiano.replace('--', '$$$')\n .replace('-', '--')\n .replace('$$$', '---'))\n\n # 4 or 5 hyphens are used as a separator: that's not supported. \n # If not strict, replace by word boundary\n if re.match('.*[^-]-{4,5}[^-]', volpiano):\n if strict:\n raise HyphenationError('contains boundaries with 4 or 5 hyphens')\n else:\n def replacer(match):\n return re.sub('-+', '---', match.group())\n volpiano = re.sub('[^-]-{4,5}[^-]', replacer, volpiano)\n # Repeat to also replace neighbouring matches\n volpiano = re.sub('[^-]-{4,5}[^-]', replacer, volpiano)\n \n # Missing pitches with more than 6 hyphens\n if re.match('.*6-{7,}6', volpiano):\n if strict:\n raise HyphenationError('Too many hyphens in missing pitches')\n else:\n volpiano = re.sub('6-{7,}6', '6------6', volpiano)\n\n # Missing pitches should be transcribed as ---6------6---: preceded \n # and followed by a word boundary (3 hyphens)\n if re.match('.*[^-]--7*6------6', volpiano) or re.match('.*6------67*--[^-]', volpiano):\n if strict:\n raise HyphenationError('Missing pitches preceded/followed by syllable boundary')\n else:\n def replacer(match):\n vol = match.group()\n if vol[2] != '-': vol = '-' + vol\n if vol[-3] != '-': vol += '-'\n return vol\n volpiano = re.sub('-+7*6------67*-+', replacer, volpiano)\n\n if '.' in volpiano:\n if strict:\n raise UnsupportedCharacterError('The dot (.) is not supported, use hyphens instead.')\n else:\n volpiano = volpiano.replace('.', '')\n\n # Double barlines written as '33' (two single barlines) rather than 4\n if '33' in volpiano:\n if strict:\n raise BarlineError('Use \"4\" for a double barline, not \"33\"')\n else:\n volpiano = volpiano.replace('33', '4')\n\n # Thick barlines are not used.\n if '5' in volpiano:\n if strict:\n raise BarlineError('Use \"4\" for a double barline, not \"5\"')\n else:\n volpiano = volpiano.replace('5', '4')\n\n # Barlines preceded by too few hyphens\n if has_standard_hyphenation and re.match('.*[^-]-{1,2}[34]', volpiano):\n if strict:\n raise HyphenationError('Barlines should be preceded by 3 hyphens')\n else:\n def replacer(match):\n vol = match.group()\n return vol[0] + '---' + vol[-1]\n volpiano = re.sub('[^-]-{1,2}[34]', replacer, volpiano)\n\n # Barlines followed by too few hyphens\n if has_standard_hyphenation and re.match('.*[34]-{1,2}[^-]', volpiano):\n if strict:\n raise HyphenationError('Barlines should be followed by 3 hyphens')\n else:\n def replacer(match):\n vol = match.group()\n return vol[0] + '---' + vol[-1]\n volpiano = re.sub('[34]-{1,2}[^-]', replacer, volpiano)\n\n # TODO the same problem occurs for non-standard hyphenation. Perhaps add this?\n return volpiano",
"def func(str):\n\treturn str.split()",
"def test_parse_se_trouve():\n result = parse(SETROUVE)\n assert result == (\"%20musée\"\"%20\"\"d'art\"\"%20\"\"et\"\"%20\"\n \"d'histoire\"\"%20\"\"de\"\"%20\"\"Fribourg\")",
"def parser(opening):\n\n if \":\" not in opening:\n opening_partial = ((opening).split(\"1.\")[0]).strip()[3:]\n else:\n opening_partial = ((opening).split(\":\")[0]).strip()[3:]\n opening_full = ((opening).split(\"1.\")[0]).strip()[3:]\n return opening_full, opening_partial",
"def e_palavra(chars):\r\n #Funcao interna\r\n def e_silaba_final(chars):\r\n \"\"\"\r\n Funcao que avalia se um dado conjunto de caracteres e uma silaba_final valida\r\n \"\"\"\r\n #So fara sentido se for pelo menos 2 caracteres, caso tenha, ira testar a sua validade\r\n # Silaba_final e valida se for monossilabo2,3 ou silaba4,5\r\n\r\n if len(chars) >= 2:\r\n if e_monossilabo2(chars) or e_monossilabo3(chars):\r\n return True\r\n elif e_silaba4(chars) or e_silaba5(chars):\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n if type(chars) != str:# Verifica se e string\r\n raise ValueError(\"e_palavra:argumento invalido\")\r\n\r\n #se for monossilabo devolve True, caso contrario, vai testar a <silaba>*<silaba_final>\r\n if e_monossilabo(chars) == True:\r\n return True\r\n else:\r\n silaba_final_encontrada = False #Variavel q controla se foi encontrada uma silaba_final\r\n i = 5\r\n\r\n # Este while testa se nos ultimos 5 caracteres existe uma silaba_final\r\n # Quando encontrar uma silabafinal valida, remove essa silaba da string, e sai do while\r\n while i > 0:\r\n if e_silaba_final(chars[-i:]):\r\n chars = chars[:-i]\r\n silaba_final_encontrada = True\r\n break\r\n i -= 1\r\n\r\n # Caso tenha encontrado uma silaba final, vai verificar se a string sobrante e uma combinacao de silabas\r\n # Caso tenha encontrado e nao tenha sobrado nada, ou seja, len(chars) == 0, devolve True\r\n # Se nao encontrou uma silaba final, entao a string nao e uma palavra valida\r\n\r\n if silaba_final_encontrada == True:\r\n silabas_encontradas = 0\r\n tam = len(chars)\r\n if tam != 0:\r\n i = 0\r\n while i < tam:\r\n for f in (5, 4, 3, 2, 1): #Testa do fim pro inicio se encontra uma silaba valida\r\n if f <= len(chars):\r\n if e_silaba(chars[-f:]):\r\n silabas_encontradas += 1\r\n chars = chars[:-f]\r\n break\r\n # Se apos a primeira iteracao nao encontrar silaba, entao a palavra nao e valida\r\n if silabas_encontradas == 0:\r\n return False\r\n # Se tiver encontrado silabas, e o tamanho da string for 0, entao e uma palavra valida\r\n elif silabas_encontradas > 0 and len(chars) == 0:\r\n return True\r\n i += 1\r\n #Se apos o while, len(chars) for > 0, entao e porque a string sobrante nao e composta apenas por silabas validas\r\n # Logo, devolve False\r\n if len(chars) > 0:\r\n return False\r\n else:\r\n return True\r\n else:\r\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
print_stars(5) \n\n\n\n\n print_stars(4) \n\n\n\n print_stars(3) \n\n\n print_stars(2) \n\n print_stars(1) \n print_stars(0) '' | def print_stars(N: int) -> str:
# if N:
# return f'*\n{print_stars(N-1)}'
# return ''
return '' if not N else f'*\n{print_stars(N-1)}' | [
"def print_stars():\n for i in range(2):\n for j in range(35):\n print(\"*\", end = '')\n print('')",
"def star():\n print('*', end='')",
"def starry_box(phrase):\n numStars = len(phrase) + 4\n print '*' * numStars\n print '*', phrase, '*'\n print '*' * numStars\n return",
"def draw_star(t):\n for i in range(5):\n t.left(144) \n t.forward(100)",
"def draw_star(turtle, n):\n\n for i in range(n):\n turtle.forward(100)\n turtle.left(180 - 180/n)",
"def sampleSquare():\n size = int(input('Enter the size: '))\n print('Sample Square of size', size)\n\n # display the first row of stars\n for i in range(size):\n star()\n newline()\n\n # display the \"middle\" rows. There are (size - 2) of them\n for i in range(size - 2):\n # for each row: star, spaces (size - 2 of them), star, newline\n star()\n for j in range(size - 2):\n space()\n star()\n newline()\n \n # display the last row of stars\n for i in range(size):\n star()\n newline()",
"def draw_stars2(list):\n for i in list:\n if type(i) is int:\n int_counter = i\n print \"*\" * int_counter\n elif type(i) is str:\n str_counter = len(i)\n \"\"\"Next, convert to lowercase and print first letter as many times as letters in that word\"\"\"\n new_str = i.lower()\n print new_str [0] * str_counter",
"def bs_stars2(num, max_stars=5):\n return mark_safe(render_stars(num, max_stars, star_set_2))",
"def print_asterisks(sequence):\n print('*' * len(sequence))",
"def starbox(width, height):\n print(\"*\" * width) # print top edge of the box\n # print sides of the box\n for _ in range(height - 2):\n print(\"*\" + \" \" * (width - 2) + \"*\")\n print(\"*\" * width) # print bottom edge of the box",
"def list(show=0):\n global stars_\n if len(stars_) == 0:\n print \"No stars have been selected, go use 'stars()'\"\n return\n if show == 0:\n i=0\n for s in stars_:\n i=i+1\n print i,s[0],s[1],s[2],s[3]\n else:\n if show > 0 and show <= len(stars_):\n s = stars_[show-1]\n print show,s[0],s[1],s[2],s[3]\n else:\n print \"Bad star index\"",
"def space():\n print(' ', end='')",
"def test_stars(self):\n self.stars.empty()\n for i in range(random.randint(8, 16)):\n self.make_star(\"random\")",
"def draw_star(size):\n to.forward(size)\n for i in range(4):\n to.right(144)\n to.forward(size)",
"def bs_stars3(num, max_stars=5):\n return mark_safe(render_stars(num, max_stars, star_set_3))",
"def star(turtle, size):\n for i in range(5):\n turtle.forward(size)\n turtle.right(144)",
"def move_and_draw_stars(screen):\n global stars\n for star in stars:\n star[1] += STAR_SPEED\n if star[1] >= screen.get_height():\n star[1] = 0\n star[0] = randrange(0,639)\n \n screen.set_at(star,(255,255,255))",
"def calculate_text_stars(word_counts) -> int:\n if word_counts == []:\n return 3\n words_per_slide = sum(word_counts) / len(word_counts)\n stars = 5 - abs(words_per_slide - 35) / 8\n # print(stars)\n return max(0, min(5, int(stars + 0.5)))",
"def print_asterisks(string):\n print(\"*\" * len(string))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert that the first (leftmost) protocol value is correctly fetched from the xforwardedheader. | def test_get_protocol_with_more_than_one_value():
request = Mock(
headers={"X-Forwarded-Proto": "https,http,http"},
protocol="http",
)
expected = "https"
protocol = get_browser_protocol(request)
assert expected == protocol | [
"def test_host_header_set_ok(self):\n requests = [\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\",\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com \\r\\n\\r\\n\",\n \"GET http://tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\",\n \"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\",\n (\n \"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\n\"\n \"Host: tempesta-tech.com\\r\\n\"\n \"Forwarded: host=tempesta-tech.com\\r\\n\"\n \"Forwarded: host=tempesta1-tech.com\\r\\n\\r\\n\"\n ),\n ]\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\", requests=requests\n )\n self.check_response(client, status_code=\"200\", warning_msg=\"frang: \")",
"def test_SERVER_PROTOCOL(self):\n return self.assertEnv('http://host/', {'SERVER_PROTOCOL': 'HTTP/1.1'})",
"def http_first(value: HeaderTupleT) -> Tuple[int, HeaderTupleT]:\n\n order = 0 if value[0].startswith(\"HTTP\") else 1\n return order, value",
"def test_headers(self):\n host = 'divmod.com'\n req = FakeRequest()\n req.setHeader('host', host)\n self.assertEqual(req.getHeader('host'), host)",
"def test_host_header_mismatch(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\nHost: example.com\\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_DIFFER)",
"def test_host_header_mismatch_empty(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\nHost: \\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_DIFFER)",
"def test_host_header_no_port_in_host(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com:80/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\"\n ],\n )\n self.check_response(client, status_code=\"200\", warning_msg=WARN_DIFFER)",
"def test_host_header_mismath_port(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com:81/ HTTP/1.1\\r\\nHost: tempesta-tech.com:81\\r\\n\\r\\n\"\n ],\n )\n self.check_response(\n client, status_code=\"403\", warning_msg=\"port from host header doesn't match real port\"\n )",
"def _is_protocol_header_frame(self, value):\r\n return isinstance(value, frame.ProtocolHeader)",
"def test_send_pp_header_v1_no_src_addr(self):\n socket = self.get_socket(PROXY_PROTOCOL.V1)\n socket.getsockname.return_value = ('1.1.1.1', 1000)\n socket.getpeername.return_value = ('2.2.2.2', 2000)\n\n socket._send_pp_header()\n\n expected_header = encode_v1('TCP4', '1.1.1.1', '2.2.2.2', 1000, 2000)\n socket.sendall.assert_called_once_with(expected_header)",
"def test_getClientIP_XForwardedFor(self):\n request = self.createRequestWithIPs()\n clientIP = server.getClientIP(request, useForwardedHeader=True)\n self.assertEqual(clientIP, '2.2.2.2')",
"def assert_header(self):\r\n\r\n if self.length > self.owner.settings[SETTINGS_MAX_FRAME_SIZE]:\r\n raise netius.ParserError(\r\n \"Headers are greater than SETTINGS_MAX_FRAME_SIZE\",\r\n stream = self.stream,\r\n error_code = FRAME_SIZE_ERROR\r\n )\r\n if self.last_type in (HEADERS, CONTINUATION) and not\\\r\n self.last_end_headers and not self.last_stream == self.stream:\r\n raise netius.ParserError(\r\n \"Cannot send frame from a different stream in middle of headers\",\r\n error_code = PROTOCOL_ERROR\r\n )",
"def test_host_header_as_ip6(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET / HTTP/1.1\\r\\nHost: [20:11:abb::1]:80\\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_IP_ADDR)",
"def test_host_header_as_ip(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET / HTTP/1.1\\r\\nHost: 127.0.0.1\\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_IP_ADDR)",
"def assert_has_valid_head(self, response, expected):\r\n assert 'head' in response\r\n head = response['head']\r\n assert isinstance(head, str)\r\n assert head == expected",
"def test_host_header_mismath_port_in_host(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com:81/ HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\"\n ],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_DIFFER)",
"def test_send_pp_header_v1_with_src_addr(self):\n socket = self.get_socket(PROXY_PROTOCOL.V1, src_addr=('6.6.6.6', 666))\n socket.getsockname.return_value = ('1.1.1.1', 1000)\n socket.getpeername.return_value = ('2.2.2.2', 2000)\n\n socket._send_pp_header()\n\n expected_header = encode_v1('TCP4', '6.6.6.6', '2.2.2.2', 666, 2000)\n socket.sendall.assert_called_once_with(expected_header)",
"def test_server_should_be_http_1_1(httpbin):\n resp = get_raw_http_response(httpbin.host, httpbin.port, \"/get\")\n assert resp.startswith(b\"HTTP/1.1\")",
"def test_http_peer(self):\n self.assertEqual(\"1.2.3.4:1234\", self.http.peer)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract metadata like original image name and crop position from the given file name. Change this function to use a different file name pattern. | def get_metadata_from_filename(file_name: str) -> namedtuple:
if os.path.isabs(f):
file_name = os.path.basename(file_name)
original_image_name = file_name.split('-')[0]
x_pos = int(file_name.split('.')[-2].split('+')[-2:][0])
Metadata = namedtuple('Metadata', ['original_image_name', 'x_pos'])
return Metadata(original_image_name, x_pos) | [
"def parseFilename(fileName):\n # regex to match names like Axis-BaldCA_2018-05-29T16_02_30_129496.jpg\n # and bm-n-mobo-c__2017-06-25z11;53;33.jpg\n regexExpanded = '([A-Za-z0-9-_]+[^_])_+(\\d{4}-\\d\\d-\\d\\d)T(\\d\\d)[_;](\\d\\d)[_;](\\d\\d)'\n # regex to match diff minutes spec for subtracted images\n regexDiff = '(_Diff(\\d+))?'\n # regex to match optional crop information e.g., Axis-Cowles_2019-02-19T16;23;49_Crop_270x521x569x820.jpg\n regexOptionalCrop = '(_Crop_(-?\\d+)x(-?\\d+)x(\\d+)x(\\d+))?'\n matchesExp = re.findall(regexExpanded + regexDiff + regexOptionalCrop, fileName)\n # regex to match names like 1499546263.jpg\n regexUnixTime = '(1\\d{9})'\n matchesUnix = re.findall(regexUnixTime + regexDiff + regexOptionalCrop, fileName)\n cropInfo = None\n if len(matchesExp) == 1:\n match = matchesExp[0]\n parsed = {\n 'cameraID': match[0],\n 'date': match[1],\n 'hours': match[2],\n 'minutes': match[3],\n 'seconds': match[4]\n }\n isoStr = '{date}T{hour}:{min}:{sec}'.format(date=parsed['date'],hour=parsed['hours'],min=parsed['minutes'],sec=parsed['seconds'])\n dt = dateutil.parser.parse(isoStr)\n unixTime = int(dt.timestamp())\n parsed['diffMinutes'] = int(match[6] or 0)\n cropInfo = match[-4:]\n elif len(matchesUnix) == 1:\n match = matchesUnix[0]\n unixTime = int(match[0])\n dt = datetime.datetime.fromtimestamp(unixTime)\n isoStr = datetime.datetime.fromtimestamp(unixTime).isoformat()\n parsed = {\n 'cameraID': 'UNKNOWN_' + fileName,\n 'date': dt.date().isoformat(),\n 'hours': str(dt.hour),\n 'minutes': str(dt.minute),\n 'seconds': str(dt.second)\n }\n parsed['diffMinutes'] = int(match[2] or 0)\n cropInfo = match[-4:]\n else:\n logging.error('Failed to parse name %s', fileName)\n return None\n if cropInfo[0]:\n parsed['minX'] = int(cropInfo[0])\n parsed['minY'] = int(cropInfo[1])\n parsed['maxX'] = int(cropInfo[2])\n parsed['maxY'] = int(cropInfo[3])\n parsed['isoStr'] = isoStr\n parsed['unixTime'] = int(unixTime)\n return parsed",
"def repackFileName(parsedName):\n cropCoords = None\n if 'minX' in parsedName:\n cropCoords=(parsedName['minX'], parsedName['minY'], parsedName['maxX'], parsedName['maxY'])\n return getImgPath('', parsedName['cameraID'], parsedName['unixTime'],\n cropCoords=cropCoords,\n diffMinutes=parsedName['diffMinutes'])",
"def build_name_with_metadata(image_base_name,metadata):\n\n\n cameraName_chunk = image_base_name[:-23]\n metadata_chunk = 'p'+str(metadata['position']['pan'])+'_t'+str(metadata['position']['tilt'])+'_z'+str(metadata['position']['zoom'])\n timeStamp_chunk = image_base_name[-23:-4]+'__'\n fileTag=image_base_name[-4:]\n imgname = cameraName_chunk+timeStamp_chunk+metadata_chunk+fileTag\n return imgname",
"def file_info(file_name, file_pattern):\n match = re.compile(file_pattern).match(file_name)\n if match:\n basepath = match.group('basepath')\n sensor = match.group('sensor')\n ax = match.group('ax')\n freq = match.group('freq')\n date = match.group('date')\n return basepath, sensor, ax, freq, date\n else:\n return None # there is no file extension to file_name",
"def get_metadata ( fname ):\n original_fname = os.path.basename ( fname )\n metadata_fname = original_fname.split(\"_\")[0] + \"_MTL.txt\"\n metadata_fname = os.path.join ( os.path.dirname ( fname ), metadata_fname )\n return metadata_fname",
"def getImageInformation(file_path):\n if os.path.isdir(file_path) == False:\n file_dir = os.path.basename(file_path)\n file_name = os.path.splitext(file_dir)[0]\n file_format = os.path.splitext(file_path)[1]\n return file_name, file_format",
"def parse_file_name(file_name):\n import os\n\n dir, base_file = os.path.split(file_name)\n if os.path.splitext(base_file)[1] == '.fz':\n base_file=os.path.splitext(base_file)[0]\n if os.path.splitext(base_file)[1] != '.fits':\n raise ValueError(\"Invalid file name \"+file)\n root = os.path.splitext(base_file)[0]\n\n ccdnum = int(root.split('_')[-1])\n return dir, root, ccdnum",
"def parse_file_name(file_name):\n\n dir, base_file = os.path.split(file_name)\n if os.path.splitext(base_file)[1] == '.fz':\n base_file=os.path.splitext(base_file)[0]\n if os.path.splitext(base_file)[1] != '.fits':\n raise ValueError(\"Invalid file name \"+file)\n root = os.path.splitext(base_file)[0]\n\n ccdnum = int(root.split('_')[-1])\n return dir, root, ccdnum",
"def get_pokemon_orig_fileinfo(pokemon_name: str) -> Tuple[str, str]:\n filename = f\"{pokemon_name}{original_image_suffix}{saved_file_type}\"\n return f\"{output_dir}orig/{filename}\", filename",
"def get_file_info(file_name):\n file_type = file_name.split(\".\")[0][:-9]\n file_date = file_name.split(\".\")[0][-8:]\n date_y = file_date[:4]\n date_m = file_date[4:6]\n date_d = file_date[6:8]\n\n return file_type, [date_y, date_m, date_d]",
"def extract_name(filename):\n name = os.path.splitext(os.path.basename(filename))[0]\n pattern = \"([0-9a-zA-Z_\\-\\.]+)_[0-9]+_[0-9]+$\"\n g = re.search(pattern, name)\n if g is not None:\n name = g.groups()[0]\n return name",
"def _parse_h36m_imgname(imgname) -> Tuple[str, str, str]:\n subj, rest = osp.basename(imgname).split('_', 1)\n action, rest = rest.split('.', 1)\n camera, rest = rest.split('_', 1)\n return subj, action, camera",
"def _parse_filename(filename, metadata):\n\n file_noext = os.path.splitext(filename)[0]\n fname = file_noext.split(\"_\")\n\n metadata[\"scene_id\"] = fname[1]\n metadata[\n \"beam_mode\"] = sat_properties.radarsat_product_characteristics[\n fname[2]]\n metadata[\"product_type\"] = fname[-1]\n try:\n metadata[\n \"product_description\"] = sat_properties.radarsat_1_data_products[\n fname[-1][:3]]['description']\n except Exception:\n metadata[\"product_description\"] = \"\"\n\n metadata[\"scene_mean_time\"] = datetime.datetime.strptime(\n fname[3] + fname[4], \"%Y%m%d%H%M%S\")\n\n return metadata",
"def identify_filename_metadata(filename, file_format='CMIP6'):\n if file_format == 'CMIP5':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'date_string']\n elif file_format == 'CMIP6':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'grid', 'date_string']\n else:\n raise NotImplementedError('file_format must be CMIP5 or CMIP6')\n\n basename = os.path.basename(filename)\n directory = os.path.dirname(filename)\n metadata = {'basename': basename, 'directory': directory}\n\n # split the filename into sections\n if basename.endswith('-clim.nc'):\n filename_sects = basename.rpartition('-clim.nc')[0].split('_')\n else:\n filename_sects = basename.rpartition('.nc')[0].split('_')\n\n # but if experiment present_day was in the filename, join these sections\n # back together. This should only occur in pre-PRIMAVERA data.\n if filename_sects[3] == 'present' and filename_sects[4] == 'day':\n filename_sects[3] += '_' + filename_sects.pop(4)\n\n # deduce as much as possible from the filename\n try:\n for cmpt_name, cmpt in zip(components, filename_sects):\n if cmpt_name == 'date_string':\n frequency = _get_frequency(metadata['table'])\n start_date, end_date = cmpt.split('-')\n try:\n metadata['start_date'] = _make_partial_date_time(\n start_date, frequency)\n metadata['end_date'] = _make_partial_date_time(\n end_date, frequency)\n except ValueError:\n msg = 'Unknown date format in filename: {}'.format(\n filename)\n raise FileValidationError(msg)\n else:\n metadata[cmpt_name] = cmpt\n except ValueError:\n msg = 'Unknown filename format: {}'.format(filename)\n raise FileValidationError(msg)\n\n # fixed variables won't have a time range and so create blank values\n potential_missing_values = ['start_date', 'end_date']\n for missing_value in potential_missing_values:\n if missing_value not in metadata:\n metadata[missing_value] = None\n\n metadata['filesize'] = os.path.getsize(filename)\n\n for freq in FREQUENCY_VALUES:\n if freq in metadata['table'].lower():\n metadata['frequency'] = freq\n break\n if 'frequency' not in metadata:\n # set a blank frequency if one hasn't been found\n metadata['frequency'] = ''\n\n return metadata",
"def _get_file_info(filename):\n filename = os.path.split(filename)[-1]\n filename = filename[:str.rfind(filename, '.jsonl.gz')]\n _, mode, idx = filename.split('_')\n return mode, idx",
"def _get_resized_name(image, width, height, crop, namespace):\n path, name = os.path.split(image.name)\n name_part = \"%s/%ix%i\" % (namespace, width, height)\n if crop:\n name_part += \"_cropped\"\n\n return os.path.join(path, name_part, name)",
"def identity(file_name):\n _, base_name = os.path.split(file_name)\n base_name, _ = os.path.splitext(base_name)\n all = re.findall(regexp, base_name)\n if len(all) != 1:\n return \"\"\n else:\n return all[0]",
"def get_img_metadata(f):\n return img_info.read_exif(f)",
"def build_dst_fn(self):\n\n # Start with EXIF DateTime\n try:\n if (self.image_type == photo_rename.IMAGE_TYPE_PNG):\n dst_fn = self.metadata['Xmp.xmp.CreateDate']\n else:\n dst_fn = self.metadata['Exif.Image.DateTime']\n except KeyError:\n dst_fn = None\n\n # If this pattern does not strictly match then keep original name.\n # YYYY:MM:DD HH:MM:SS (EXIF) or YYYY-MM-DDTHH:MM:SS (XMP)\n if (dst_fn and not\n re.match(r'^\\d{4}\\W\\d\\d\\W\\d\\d.\\d\\d\\W\\d\\d\\W\\d\\d$', dst_fn)):\n # Setup for next step.\n dst_fn = None\n\n # Don't assume exif tag exists. If it does not, keep original filename.\n # Lowercase extension.\n if dst_fn is None:\n dst_fn = \"{base}.{ext}\".format(\n base=self.src_fn_base, ext=self.src_fn_ext_lower)\n else:\n dst_fn = \"{0}.{1}\".format(\n dst_fn, photo_rename.EXTENSIONS_PREFERRED[self.image_type])\n\n # XXX: One may argue that the next step should be an 'else' clause of\n # the previous 'if' statement. But the intention here is to clean up\n # just a bit even if we're not really renaming the file. Windows\n # doesn't like colons in filenames.\n\n # Rename using Exif.Image.DateTime or Xmp.xmp.CreateDate\n dst_fn = re.sub(r':', r'', dst_fn)\n dst_fn = re.sub(r'-', r'', dst_fn)\n dst_fn = re.sub(r' ', r'_', dst_fn)\n dst_fn = re.sub(r'T', r'_', dst_fn)\n\n return dst_fn"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert the crop represented by file_name into this image. | def insert(self, file_path: str, annot_type: str) -> None:
if self._valid_file_name_regex.match(os.path.basename(file_path)) is None:
raise ValueError(f'Illegal file name: {os.path.basename(file_path)}')
x_pos = get_metadata_from_filename(file_path).x_pos
if x_pos in self._x_positions:
col = self._cols[x_pos]
else:
col = Column()
self._x_positions.append(x_pos)
self._x_positions.sort()
col.insert(Crop(file_path, annot_type))
self._cols[x_pos] = col
self.n_cols = len(self._cols) | [
"def _add_crop(self, crop_name, parameters):\n variety_sets = parameters[\"CropParameters\"][\"Varieties\"]\n self._store[crop_name] = variety_sets",
"def crop_image(self, image):\n\n pass",
"def crop_to_hint(image_file):\n # [START crop_to_hint]\n vects = get_crop_hint(image_file)\n\n im = Image.open(image_file)\n im2 = im.crop([vects[0].x_coordinate, vects[0].y_coordinate,\n vects[2].x_coordinate - 1, vects[2].y_coordinate - 1])\n im2.save('output-crop.jpg', 'JPEG')\n # [END crop_to_hint]",
"def crop_shapefile(shape_filename, xmin, xmax, ymin, ymax):\n output_shape_filename = os.path.join(os_scratch_folder, \"{}_wgs_cropped.shp\".format(CASE_STUDY_NAME))\n runcommand(\"ogr2ogr -clipsrc {} {} {} {} {} {}\".format(xmin, ymin, xmax, ymax, output_shape_filename, shape_filename))\n print(\"Performing additional cropping..\")\n return output_shape_filename",
"def copier_photo(self, name):\n\t\tif self.filename != False:\n\t\t\tshutil.copyfile(self.filename, name + self.filename[-4:])\n\t\telse: #dans le cas ou aucune photo n'a ete choisi\n\t\t\tprint(\"no file selected\")\n\t\tself.filename = False",
"def generateMask(self, nameFile): \n imgPath = os.path.join(GG.utils.PATH_PHOTO_MASK, nameFile)\n imgMask = Image.open(GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"mask.png\")))\n imgTemplate = Image.open(GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"template.png\")))\n imgUpload = Image.open(imgPath)\n size = MASK_SIZE[self.avatarConfiguration[\"headSize\"]]\n imgUploadResized = imgUpload.resize(size, Image.ANTIALIAS)\n imgMask.paste(imgUploadResized, MASK_COORD[self.avatarConfiguration[\"headSize\"]], imgTemplate)\n imgMask.save(MASK_UPLOAD)\n self.avatarConfiguration[\"mask\"] = \"imgUploadMask.png\"\n self.paintMask()",
"def on_pic_select(self, widget_name, file_path, mouse_pos):\n\n self.popup.dismiss()\n\n database_api.uploadProfilePic(Cache.get(\"info\", \"token\"),\n Cache.get(\"info\", \"nick\"),\n file_path[0]\n )\n\n if round_image.update_image():\n Cache.append(\"info\",\n \"pict\",\n True\n )\n\n pic = [self.ico_user_picture,\n self.ids[\"img_user_card\"]\n ]\n for pp in pic:\n pp.source = \"data/img/pic_user_current.png\"\n pp.reload()",
"def crop_image (filename):\n from PIL import Image\n image = Image.open(filename)\n for edge in 'NSWE':\n image = _crop(image, edge)\n image.save(filename)",
"def save(self, *args, **kwargs):\n\t\tsuper(Crop, self).save(*args, **kwargs)\n\n\t\tif self.size:\n\t\t\t# get all the manually cropped sizes with the same aspect ratio as this crop/size\n\t\t\tsizes = Size.objects.all().filter(\n\t\t\t\taspect_ratio=self.size.aspect_ratio, \n\t\t\t\tsize_set=self.size.size_set,\n\t\t\t\tauto_size=0,\n\t\t\t).order_by(\"-width\")\n\t\t\t\n\t\t\tif sizes:\n\t\t\t\t# create the cropped image \n\t\t\t\tcropped_image = utils.create_cropped_image(\n\t\t\t\t\tself.image.path, \n\t\t\t\t\tself.crop_x, \n\t\t\t\t\tself.crop_y, \n\t\t\t\t\tself.crop_w, \n\t\t\t\t\tself.crop_h\n\t\t\t\t)\n\t\t\t\t\n\t\t\t\t# loop through the other sizes of the same aspect ratio, and create those crops\n\t\t\t\tfor size in sizes:\n\t\t\t\t\tself.image.rescale(cropped_image, size=size)",
"def autocrop_img(filename):\n import subprocess\n import os\n\n try:\n cwd, img_name = os.path.split(filename)\n\n bashcmd = 'epstool --copy --bbox %s %s' % (img_name, 'tmp_'+img_name)\n process = subprocess.Popen(bashcmd.split(), stdout=subprocess.PIPE, cwd=cwd)\n\n process.wait()\n bashcmd2 = 'mv %s %s' % ('tmp_'+img_name, img_name)\n process2 = subprocess.Popen(bashcmd2.split(), stdout=subprocess.PIPE, cwd=cwd)\n except:\n raise RuntimeError('Unable to tight layout. Increase pad_inches?')",
"def crop_image(inputimage, folder, newimgname, xtop=0, ytop=64, xbottom=512, ybottom=448):\n\timg = Image.open(folder + os.sep + inputimage)\n\timg = img.crop((xtop, ytop, xbottom, ybottom))\n\timg.save(folder + os.sep + newimgname, 'PNG')",
"def _copy_image(self, name):\n image = self._get_image(name)\n QtGui.QApplication.clipboard().setImage(image)",
"def crop(self, region):\n time.sleep(2)\n LOG.info(\"Crop fd image for '%s'\", region)\n dest = self._destination(region)\n self._ensure_src()\n self._ensure_dir(dest)\n newfile_fmt = \"%H-%M-%S\"\n if region == \"va\":\n resolution = self.satellite['crop']['va']\n newfile_name = \"%s.png\" % self.va_date.strftime(newfile_fmt)\n elif region == \"ca\":\n resolution = self.satellite['crop']['ca']\n newfile_name = \"%s.png\" % self.ca_date.strftime(newfile_fmt)\n elif region == 'usa':\n resolution = self.satellite['crop']['usa']\n newfile_name = \"%s.png\" % self.va_date.strftime(newfile_fmt)\n\n\n newfile = \"%s/%s\" % (dest, newfile_name)\n if not self.file_exists(newfile):\n crop_cmd = [\"/usr/bin/convert\", \"%s\" % self.source,\n \"-crop\", '\"%s\"' % resolution,\n \"+repage\", \"%s\" % newfile]\n self._execute(crop_cmd)\n self.overlay(newfile, region)",
"def add_transect_file(self, file_name: str):\n # Create a transect dict\n #transect = {\n # 'Path': file_path,\n # 'File': file_name,\n # 'Number': index,\n #}\n\n # Add the transect to the file\n self.Files.append(file_name)",
"def _generate_crop(self):\n if self.box_drawn == True:\n if (self.cd_pic_num != -1) & (self.cd_crop_num == 1):\n self.communicator.generate_crop(picture_num=self.cd_pic_num, \\\n xa=self.xa, ya=self.ya, xb=self.xb, yb=self.yb)\n else:\n print \"ERROR: can only generate a new crop from a thumbnail\"\n else:\n print \"ERROR: please select an area to generate a crop from\"",
"def set_cropping(self, crop=True):\n self._crop = crop\n self._final = None # Force rebuild",
"def process(file_name):\n img=Image.open(str(file_name))\n cim_resized = img.resize((40,40), resample=Image.LANCZOS)\n n = cim_resized.convert('L')\n cropped = np.array(n).astype(np.float64)\n im=Image.fromarray(cropped)\n im.show()\n normalized_cropped_image = cropped - np.mean(cropped)\n normalized_cropped_image = normalized_cropped_image.reshape((-1, image_size, image_size, num_channels)).astype(np.float32)\n predicted_arr = predict(normalized_cropped_image)\n label = ''.join(['' if int(x[0]) == 10 else str(x[0]) for x in list(predicted_arr)])\n print 'LABEL: ' + label",
"def croping1(filename, landmarks):\n img_shape = cv2.imread(filename)\n width = img_shape.shape[0]\n height = img_shape.shape[1]\n img = Image.open(filename)\n left = landmarks[27][0] - landmarks[0][0]\n top = landmarks[8][1] - landmarks[27][1]\n right = landmarks[16][0] - landmarks[27][0]\n if (landmarks[0][0] - left) - 50 < 0:\n left = 0\n else:\n left = (landmarks[0][0] - left) - 50\n if (landmarks[27][1] - top) - 50 < 0:\n top = 0\n else:\n top = (landmarks[27][1] - top) - 50\n if (landmarks[16][0] + right) + 50 > width:\n right = width\n else:\n right = (landmarks[16][0] + right) + 50\n if (landmarks[8][1] + top) + 100 > height:\n down = height\n else:\n down = (landmarks[8][1] + top) + 100\n#saving cropped image\n img_croped = img.crop((left, top, right, down))\n img_croped.save(\"resized1.jpg\")",
"def update_calibration_entry(self, file_name):\n \n self.calibration_file_entry.configure(state=tk.NORMAL)\n self.calibration_file_entry.delete(0, 'end')\n self.calibration_file_entry.insert(0, file_name)\n self.calibration_file_entry.configure(state=\"readonly\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove unlabelled columns in [startcol_width, end+col_width]. | def _remove_overlaps(start, end) -> int:
start = self._x_positions[start % self.n_cols]
end = self._x_positions[int(end) % self.n_cols]
n_removed = 0
for x, col in self._cols.items():
if start - self.col_width <= x <= start or end <= x <= end + self.col_width:
if col.label is None:
n_removed += col.mark_as('ignore')
return n_removed | [
"def get_cols_drop():",
"def CleanUp(self):\n blankColumnPattern = re.compile('^-*$')\n blankColumns = []\n for columnIndex in range(self.alignment.get_alignment_length() - 1):\n columnValues = self.alignment[:,columnIndex]\n match = blankColumnPattern.search(columnValues)\n if (match):\n blankColumns.append(str(columnIndex))\n for column in blankColumns[::-1]:\n self.DeleteRange(',' + str(column), True)\n self.Show(self.displayedColumn)\n self.BackupAlignment()",
"def strip_left_cols(df, cols_to_strip):\n columnss = df.columns\n return df[columns[cols_to_strip:]]",
"def clean_columns_rows(x, y, percentage_lign, percentage_col, flags):\n nb_col = len(x[0])\n nb_lign = len(x)\n col_to_del = []\n lign_to_del = []\n x_cop = x\n y_cop = y\n \n for col in range(0,nb_col):\n unique, counts = np.unique(x_cop[:,col], return_counts = True)\n \n if flags[0] == True and counts[np.where(unique == 0.)[0]] > 0.8*nb_lign:\n col_to_del.append(col)\n \n if counts[np.where(unique == -999.)[0]] > percentage_lign*nb_lign:\n col_to_del.append(col)\n \n x_cop = np.delete(x_cop, col_to_del, 1)\n \n for lign in range(0,nb_lign):\n unique, counts = np.unique(x[lign,:], return_counts = True)\n \n if counts[np.where(unique == -999.)[0]] >= percentage_col*nb_lign:\n lign_to_del.append(lign)\n \n x_cop = np.delete(x_cop, lign_to_del, 0)\n y_cop = np.delete(y_cop, lign_to_del, 0)\n \n return x_cop, y_cop, col_to_del",
"def dataframe_clip_strings(df, max_width, include=None, exclude=None):\n for cn in df.columns:\n if include is not None and cn not in include:\n continue\n if exclude is not None and cn in exclude:\n continue\n try:\n df[cn] = [vv[:min(max_width, len(vv))] for vv in df[cn].values if vv is not None]\n except TypeError:\n pass\n\n return df",
"def _pad_columns(self):\n previous_row = self.previous\n\n if previous_row is None:\n # Start of layout; don't need to pad columns\n return\n\n while len(previous_row.end) < len(self.start):\n previous_row.end.append(set())\n\n while len(previous_row.end) > len(self.start):\n self.start.append(set())\n\n while not previous_row.end[-1] and not self.start[-1]:\n del previous_row.end[-1], self.start[-1]",
"def _remove_redundant_columns(self):\n self.dataframe.drop(['letter', 'sentiment'], axis=1, inplace=True)",
"def remove_below_lower_length_limit(self) -> None:\n for column_name in self.data:\n threshold_executor = TrimUtils.remove_text_below_lower_length_threshold(\n self.config[f'{column_name}_lower_length_limit']\n )\n self.data = self.data[self.data[column_name].map(threshold_executor)]\n self.data.reset_index(drop=True, inplace=True)",
"def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]",
"def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols",
"def delete_all_gap(self):\n # pdb.set_trace()\n\n rem = set(self.get_all_gap_cols())\n subset = [x for x in range(0, self.get_length()) if x not in rem]\n self.remove_columns(set(rem))\n #_LOG.debug(\"Alignment length reduced to %d\" % len(subset))\n return subset",
"def del_unwanted_cols_fact(data):\r\n # del data['do_plu']\r\n del data['dorder_receiveon']\r\n # del data['dorder_receiveon_time']\r\n return data",
"def delete_padded_rows(data, labels, n_dimensions):\n labels = np.repeat(labels, data.shape[1])\n data = data.reshape(-1, n_dimensions)\n added_rows = np.where(np.all(data == 0, axis=1))\n data = data[~added_rows[0]]\n labels = labels[~added_rows[0]]\n\n return data, labels",
"def preprocessData(df, removeCols):\n\tdf1=df.drop(removeCols, axis=1)\n\t\t\n\treturn df1",
"def remove_empty_columns(aln, enforce_codon=False):\n\n ind = []\n seqs = aln.values()\n alnlen = aln.alignlen()\n\n if not enforce_codon:\n for i in range(alnlen):\n for seq in seqs:\n if seq[i] != \"-\":\n ind.append(i)\n break\n else:\n if alnlen % 3 != 0:\n raise Exception(\n \"cannot set enforce_codon if alignment length \"\n \"is not a multiple of three\")\n\n for i in range(0, alnlen, 3):\n for seq in seqs:\n if seq[i:i+3] != \"---\":\n ind.extend([i, i+1, i+2])\n break\n\n return subalign(aln, ind)",
"def remove_colspan(self, ):\n if self.AttributeNames.COLSPAN in self.attrs:\n del self.attrs[self.AttributeNames.COLSPAN]\n return self",
"def _set_columns(self, start, end):\n if start <= end <= self.width:\n self._write(ST7789_CASET, _encode_pos(\n start+self.xstart, end + self.xstart))",
"def remove_gapped_columns(aln):\n cols = zip(* aln.values())\n ind = util.find(lambda col: \"-\" not in col, cols)\n return subalign(aln, ind)",
"def fitCols(self, col_start, col_end, sheet):\r\n col_n = col_start\r\n while col_n <= col_end:\r\n self.fitCol(col_n, sheet)\r\n col_n = col_n + 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return index of first unlabelled column after x. | def _next_unlabelled_col(x):
for i in range(self.n_cols):
idx = (x + i) % self.n_cols
x_current = self._x_positions[idx]
if self._cols[x_current].label is None:
return idx | [
"def XToCol(self, x):\r\n \r\n colLeft = 0\r\n numColumns = self.GetColumnCount()\r\n for col in xrange(numColumns):\r\n \r\n if not self.IsColumnShown(col):\r\n continue \r\n\r\n column = self.GetColumn(col)\r\n\r\n if x < (colLeft + column.GetWidth()):\r\n return col\r\n \r\n colLeft += column.GetWidth()\r\n \r\n return wx.NOT_FOUND",
"def _findIndex(self, x):\n if x< self[0][0] or x> self[-1][0]:\n return None\n\n idx = bisect.bisect_left(self.xproxy, x)\n if self[idx][0] == x:\n return idx\n else:\n return idx-1",
"def index_at(self, x, y):\n cell_size = self.cell_size\n rect = self.rect\n idx = ((x-rect[0])//cell_size) + ((y-rect[1])//cell_size) * self.cols\n return int(idx) if -1<idx<self.num_buckets else None",
"def get_full_column(X: np.ndarray):\n if len(X.shape) == 1:\n X = X.reshape((1, *X.shape))\n inds = np.arange(X.shape[1])\n wherenonnan = np.isfinite(X).all(axis=0)\n ind = inds[wherenonnan][0]\n return ind",
"def min_idx(self, col):\n return self._min(col)[1]",
"def _get_column(self, index):\n left, right = self._get_columns()\n return left if index < left.count else right",
"def columnAt(self, p_int): # real signature unknown; restored from __doc__\n return 0",
"def _col_index(column):\n if column:\n return column.index\n else:\n return '-'",
"def get_colnumber(self, header):\n for i in range(0, len(self.data)):\n if self.data[i][0] == header:\n return i\n return None",
"def xy_to_index(x, y):\n index = y * columns + x\n return index",
"def getColIdx(self, col):\n try: \n return int(col)\n except:\n return ord(col)-ord('a')",
"def intrv(self, x):\n gb = self.breakpoints[self.mask]\n n = gb.size - self.nord\n indx = np.zeros(x.size, dtype=int)\n ileft = self.nord - 1\n for i in range(x.size):\n while x[i] > gb[ileft+1] and ileft < n - 1:\n ileft += 1\n indx[i] = ileft\n return indx",
"def shortest_column(self):\n return self.column_bottoms.index(min(self.column_bottoms))",
"def get_col(i_col):\n return i_col[1]",
"def getColIdx(self, col):\n try:\n return int(col)\n except:\n return ord(col)-ord('a')",
"def __column_index_by_var(self, var):\n if var not in self.__column_variables_names:\n return -1\n return self.__column_variables_names.index(var)",
"def column(self, index):\n if index < 0 or index >= len(self.__table[0]):\n return -1\n return [row[index] for row in self.__table]",
"def _get_column_index(self, column_name):\n return self._header.index(column_name)",
"def find_tok_column(self, lexpos):\n found = self.lexer.lexdata.rfind('\\n', 0, lexpos)\n column = lexpos - max(0, found)\n return column"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Move the file associated with this crop to the directory path/annot_type, where annot_type is this crop's annotation type. | def move_to(self, path: str) -> None:
self._new_path = os.path.join(path, self.annot_type, os.path.basename(self._file_path))
os.rename(self._file_path, self._new_path)
self._file_was_moved = True | [
"def moveFile(self, srcPath):\n # Gets the classification for the file type of the path moved\n classification = self.classifyFile(srcPath)\n\n if classification:\n # Gets the output path given the file type\n newPath = self.outPaths[classification][\"outPath\"] + srcPath.split(\"/\")[-1]\n\n # Execute instruction\n os.replace(srcPath, newPath)",
"def MoveTo(self):\n print('move image file to...')",
"def move(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n if os.path.exists(self.target):\n shutil.rmtree(self.target)\n log.info(\"Copying %s on the local filesystem\" % self.type)\n shutil.copytree(self.old_artifact_path, self.target)\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")",
"def _move(self, file):\n ext = os.path.splitext(file)[1].replace('.', '').lower()\n ftype = False\n for e in self.cfg['file_types'].keys():\n if ext in self.cfg['file_types'][e]:\n ftype = e\n break\n dest = os.path.join(self.today_dir, ftype)\n try:\n shutil.move(file, dest)\n except:\n fname = os.path.splitext(os.path.basename(file))\n rnd = ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))\n dest = os.path.join(self.today_dir, ftype, fname[0]+rnd+fname[1])\n shutil.move(file, dest)",
"def move(self, **kwargs):\r\n if os.path.exists(self.old_artifact_path):\r\n if os.path.exists(self.target):\r\n shutil.rmtree(self.target)\r\n log.info(\"Copying %s on the local filesystem\" % self.type)\r\n shutil.copytree(self.old_artifact_path, self.target)\r\n else:\r\n log.warning(\"Not moving docs, because the build dir is unknown.\")",
"def move_file(self, path: PathLike, dest: PathLike, force: bool = False):",
"def move_file(path):\n dst_path = request.args.get('to')\n return files.move_file(path, dst_path)",
"def insert(self, file_path: str, annot_type: str) -> None:\n if self._valid_file_name_regex.match(os.path.basename(file_path)) is None:\n raise ValueError(f'Illegal file name: {os.path.basename(file_path)}')\n x_pos = get_metadata_from_filename(file_path).x_pos\n if x_pos in self._x_positions:\n col = self._cols[x_pos]\n else:\n col = Column()\n self._x_positions.append(x_pos)\n self._x_positions.sort()\n col.insert(Crop(file_path, annot_type))\n self._cols[x_pos] = col\n\n self.n_cols = len(self._cols)",
"def _move_to_inserted_directory(file_path: str):\n parts = list(Path(file_path).parts)\n parts.insert(-1, 'inserted')\n move(file_path, str(Path(*parts)))",
"def _move_image(self, label, ind):\r\n root, file_name = os.path.split(self.df.sorted_in_folder[ind])\r\n # two lines below check if the filepath contains as an ending a folder with the name of one of the labels\r\n # if so, this folder is being cut out of the path\r\n if os.path.split(root)[1] in labels:\r\n root = os.path.split(root)[0]\r\n# output_path = os.path.join(root, label, file_name)\r\n output_path = self.label_dir + '/' + label + '/' + file_name\r\n print(\"file_name =\",file_name)\r\n print(\" %s --> %s\" % (file_name, label))\r\n move(self.df.sorted_in_folder[ind], output_path)\r\n \r\n # keep track that the image location has been changed by putting the new location-path in sorted_in_folder \r\n self.df.loc[ind,'sorted_in_folder'] = output_path\r\n \r\n #####\r",
"def mv(self, from_path, to_path):\n self.api_client.file_move(from_path, to_path)",
"def move_file(path):\n new_path = os.path.join(TEST_DIR, TEST_FILE)\n command = ['mv', TEST_FILE, new_path]\n file_operation(path, command)",
"def move(self):\n models_dir = os.path.join(self.ilamb_root, self.dest_dir)\n for f in self.ingest_files:\n if f.is_verified:\n target = target_dir = os.path.join(models_dir, f.data)\n if not os.path.isdir(target_dir):\n makedirs(target_dir, mode=0775)\n if self.overwrite_files:\n target = os.path.join(target_dir, f.name)\n msg = file_moved.format(f.name, target)\n try:\n shutil.move(f.name, target)\n except IOError:\n msg = file_protected.format(target)\n if os.path.exists(f.name):\n os.remove(f.name)\n except shutil.Error:\n msg = file_exists.format(f.name, target)\n if os.path.exists(f.name):\n os.remove(f.name)\n else:\n if len(self.link_dir) > 0:\n self.symlink(target_dir, f)\n finally:\n self.log.add(msg)",
"def _move(self):\n newpath = self.action['newpath']\n try:\n self.fs.move(self.fp,newpath)\n except OSError:\n raise tornado.web.HTTPError(400)\n return newpath",
"def move(self, file, rank, promotionType=\"\"):\n return self._run_on_server('move', file=file, rank=rank, promotionType=promotionType)",
"def move(self, media):\n new_path = os.path.join(\n self.path(),\n media.timestamp().strftime(self.media_path_format))\n media.move_to(new_path)",
"def move_file(self, url, path, file, targ, dst):\n targ = self.adjust_path(targ)\n dst = self.adjust_path(dst)\n # print('\\n m365 spo file move -o json -u {0} -s {1} -t {2}'.format(url, path + '/' + file, '/' + targ + '/' + dst + '/'))\n # return emulated_result_1 \n result = (subprocess.run(['m365', 'spo file move -o json -u', url, '-s', path + '/' + file, '-t', '/' + targ + '/' + dst + '/'], \\\n stdout=subprocess.PIPE))\n\n if (result.returncode == 0):\n print('/' + path + '/' + file + bcolors.BOLD + ' ==> ' + bcolors.ENDC, end='')\n print('/' + dst + '/' + bcolors.OKCYAN + \" FILE MOVED\" + bcolors.ENDC)\n else:\n print('/' + path + '/' + file + bcolors.BOLD + ' ==> ' + bcolors.ENDC, end='')\n print(dst + '/' + bcolors.FAIL + \" FILE MOVE FAILED: \" + bcolors.ENDC + \\\n json.loads(result.stdout.decode('utf-8'))['message'])\n \n return",
"def mv(self, src_path, dst_path):\n try:\n postdata = codecs.encode(json.dumps({ 'src': src_path, 'dst': dst_path }), 'utf-8')\n self._urlopen('/api/fileops/move', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to move '{}' to '{}'\".format(src_path, dst_path))",
"def on_moved(self, event):\n print(\"Moved\")\n time.sleep(5)\n self.moveFile(event.dest_path)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Undo a former file movement by moving the file back to its origin. | def move_back(self) -> None:
if self._file_was_moved:
os.rename(self._new_path, self._file_path)
pass | [
"def undo(backup):\r\n backup.load_backup()\r\n backup.undo_moves()",
"def undo():\n\n try:\n my_file.undo()\n except FileNotFoundError:\n print('No file has been read yet')\n except Exception:\n print('You must make an edit to undo')",
"def undo():",
"def undo_moves(self):\r\n logging.info(\"Undoing all moves held in records\")\r\n for move in self.record.keys():\r\n logging.debug('Moving {} to {}'.format(move, self.record[move]))\r\n try:\r\n os.rename(move, self.record[move])\r\n os.removedirs(os.path.dirname(move))\r\n except OSError as e:\r\n logging.error('There was an error moving the file {}'.format(move))\r\n logging.error('Error status: {}'.format(e))\r\n logging.info(\"Completed undoing moves\")\r\n try:\r\n os.remove(self.backup)\r\n except OSError as e:\r\n logging.error('There was an error removing the file {}'.format(self.backup))\r\n logging.error('Error status: {}'.format(e))",
"def restore_files(self):\n for (original, modified) in self.modified_files:\n if os.path.isfile(original):\n os.remove(modified)\n shutil.move(original, modified)",
"def undo_pop(self):\n # print(\"tokundo\", file=sys.stderr)\n self.pos -= 1",
"def undo(self):\n if self.history:\n xy0, xy1, data_size = self.history.pop()\n x0, y0 = xy0\n x1, y1 = xy1\n self._used[y1][x1] -= data_size\n self._used[y0][x0] = data_size\n if self.goal == xy1:\n self.goal = xy0",
"def undo(self):\n self._check_undo_prerequisites()\n self._decrement_history_pointer()\n self._replay_history()",
"def undo(self):\n if self._history_position > 0:\n self._history_position -= 1\n self._commands[\n self._history[self._history_position][1]\n ].execute(self._history[self._history_position][2])\n else:\n print(\"nothing to undo\")",
"def _restore_file(file):\n\n os.remove(file)\n os.rename(file + '.bak', file)",
"def undo(self):\n assert 0 <= self.redoIndex <= len(self.redoChain)\n # If tempChange is active, undo it first to fix cursor position.\n if self.tempChange:\n self.__undoMove(self.tempChange)\n self.tempChange = None\n while self.redoIndex > 0:\n self.redoIndex -= 1\n changes = self.redoChain[self.redoIndex]\n if self.debugRedo:\n app.log.info('undo', self.redoIndex, repr(changes))\n if ((changes[0][0] == 'f' or changes[0][0] == 'm') and\n len(changes) == 1):\n # Undo if the last edit was a cursor move.\n self.__undoChange(changes[0])\n else:\n self.shouldReparse = True\n # Undo previous non-trivial edit\n for change in reversed(changes):\n self.__undoChange(change)\n break\n self.processTempChange = False",
"def undo(self):\n self.cnvImgTest.undoLast()",
"def restore_last_undo_point(self):\n self.unload()",
"def undo(self):\n for move in reversed(self.undos):\n move.do()",
"def reset(self):\n self.source.seek(0)\n self.target.seek(0)",
"def undo(self, event):\n self.parent.undo()",
"def _undo(self):\r\n raise NotImplementedError",
"def undo_last_move(self):\n if self.last_move is None:\n return\n x, y, i, j = self.last_move\n self.boards[x][y].undo_last_move()\n if len(self.history) > 1:\n self.last_move = self.history[-2]\n else:\n self.last_move = None\n self.__on_turn = Square.X if self.__on_turn == Square.O else Square.O\n del self.history[-1]",
"def undo_move(self, n=1):\n self.state = self.move_history[-n - 1]\n self.positions = self.copy_board(self.state[1])\n # delete all moves between the current state and the restored state\n del self.move_history[-n:]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Mark this column with the provided label. Returns number of labelled crops. | def mark_as(self, label: str) -> int:
self.label = label
return len(self._content) // len(ANNOTATIONS) | [
"def mark_label(self, label):\n\t\tcore.BNLowLevelILMarkLabel(self.handle, label.handle)",
"def get_count_by_label(self, label=None):\n if label is None:\n return len(self.data)\n else:\n return sum(1 for d in self.data if d.pred == label)",
"def inc_label(self):\n self.label_count += 1\n return self.label_count",
"def encode_label(self, label: str) -> int:\n return self.class_map[label]",
"def relabel_car_hood(label, new_val=0, height_limit=495):\n \n if not isinstance(label, np.ndarray):\n label = np.array(label)\n if len(label.shape) != 2:\n raise ValueError('input is not a 2D array')\n if label.shape[0] < height_limit-1:\n raise ValueError(\"height_limit must be less than label's height\")\n \n np.place(label[height_limit:,:], label[height_limit:,:]==10, new_val)",
"def label_count(self) -> int:\n return len(self.itol)",
"def notebook_cell_count(label='cell_count'):\n code_cells = orm.CodeCell.__table__\n columns = [\n code_cells.c.notebook_id,\n sa.func.count(code_cells.c.cell_number.distinct()).label(label)\n ]\n s = sa.select(columns).group_by(code_cells.c.notebook_id)\n return pd.read_sql(s, db.engine)",
"def GetLabelWidth(label):\n assert type(label) is str, \"Incorrect type\"\n\n length = 0\n for c in label:\n length += glutBitmapWidth(GLUT_BITMAP_HELVETICA_18, ord(c))\n\n assert type(length) is int\n assert length >= 0\n\n return length",
"def label(self):\n return self._label_shape",
"def index_of(self, label: str) -> int:\n return self.label_dict[label]",
"def __probLabel(self, label):\n if label not in self.db.labelToDocsCount.keys():\n print \"Error: label %s not in DB\" % label\n exit()\n\n # Use Laplacian smoothing\n return float(self.db.labelToDocsCount[label] + 1) / (self.db.allDocsCount + 2)",
"def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2IUC2_GetCount(self, label)",
"def get_label_num(self, *args):\n return _ida_hexrays.ctree_item_t_get_label_num(self, *args)",
"def assign_label(x, y, label, segments):\n value = segments[y, x]\n labelled_image[segments == value] = label",
"def count_specific_label(label):\n\n label_count = []\n for iter in range(train.shape[0]):\n if str(label) in train.Target[iter].split():\n for sample_label in train.Target[iter].split():\n label_count.append(sample_label)\n return np.unique(np.array(label_count), return_counts=True)",
"def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUC2_GetCount(self, label)",
"def label(self, location, *args, **kwargs):\n\n if isinstance(location, fslimage.Image):\n return self.maskLabel(location, *args, **kwargs)\n else:\n return self.coordLabel(location, *args, **kwargs)",
"def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3IUC3_GetCount(self, label)",
"def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2ISS2_GetCount(self, label)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Move all files of this column to the corresponding directory, if this column is not labeled to be ignored. Returns number of files moved. | def move(self, dry_run: bool) -> int:
if self.label == 'ignore':
return 0
file_counter = 0
for crop in self._content:
if not dry_run:
crop.move_to(self.label)
file_counter += 1
return file_counter | [
"def _move_files(self):\n self._move_directory(self._origin, self._destination)\n for directory in self._filesystem.listdir(self._filesystem.join(self._layout_tests_root, PLATFORM_DIRECTORY)):\n self._move_directory(self._filesystem.join(PLATFORM_DIRECTORY, directory, self._origin),\n self._filesystem.join(PLATFORM_DIRECTORY, directory, self._destination))",
"def move(self):\n models_dir = os.path.join(self.ilamb_root, self.dest_dir)\n for f in self.ingest_files:\n if f.is_verified:\n target = target_dir = os.path.join(models_dir, f.data)\n if not os.path.isdir(target_dir):\n makedirs(target_dir, mode=0775)\n if self.overwrite_files:\n target = os.path.join(target_dir, f.name)\n msg = file_moved.format(f.name, target)\n try:\n shutil.move(f.name, target)\n except IOError:\n msg = file_protected.format(target)\n if os.path.exists(f.name):\n os.remove(f.name)\n except shutil.Error:\n msg = file_exists.format(f.name, target)\n if os.path.exists(f.name):\n os.remove(f.name)\n else:\n if len(self.link_dir) > 0:\n self.symlink(target_dir, f)\n finally:\n self.log.add(msg)",
"def _move_image(self, label, ind):\r\n root, file_name = os.path.split(self.df.sorted_in_folder[ind])\r\n # two lines below check if the filepath contains as an ending a folder with the name of one of the labels\r\n # if so, this folder is being cut out of the path\r\n if os.path.split(root)[1] in labels:\r\n root = os.path.split(root)[0]\r\n# output_path = os.path.join(root, label, file_name)\r\n output_path = self.label_dir + '/' + label + '/' + file_name\r\n print(\"file_name =\",file_name)\r\n print(\" %s --> %s\" % (file_name, label))\r\n move(self.df.sorted_in_folder[ind], output_path)\r\n \r\n # keep track that the image location has been changed by putting the new location-path in sorted_in_folder \r\n self.df.loc[ind,'sorted_in_folder'] = output_path\r\n \r\n #####\r",
"def moveFiles(inputDir, inputFiles):\n\tfor file in inputFiles:\n\t\tlogger.debug('moveFiles: {0}'.format(file))\n\t\tshutil.move(join(inputDir, file), join(inputDir, 'processed', file))\n\n\treturn 0",
"def test_verifyMoveFiles(self):\n\n target = 'testes2/'\n self.createDirectorySource()\n self.createDirectoryTarget()\n self.createFiles()\n\n for file in self.files:\n m = re.search(self.regex, file)\n if m != None:\n command = \"%s%s\" %('testes/', file)\n shutil.move(command, target)\n\n after = os.listdir('testes/')\n moved = os.listdir('testes2/')\n\n self.assertEqual(len(after), len(self.files)-2)\n self.assertEqual(len(moved), 2)\n\n self.cleanSource()\n self.cleanTarget()",
"def move_file():\n # print(\"\\n\".join(os.listdir(filepath)))\n # folders = [os.path.join(filepath, fld) for fld in os.listdir(filepath)]\n # print(filepath + \":\\n \" + \"\\n \".join(folders))\n folders = filter(os.path.isdir, os.listdir(u\".\"))\n # print(\"Sub-folders: \", u\"\\n\".join(folders))\n for folder in folders:\n files = [os.path.join(folder, fn) for fn in os.listdir(folder)]\n files = filter(os.path.isfile, files)\n for fn in files:\n _, filename = os.path.split(fn)\n shutil.move(fn, filename)\n assert 0 == len(os.listdir(folder))",
"def moveprocessedfb2(self, input_folder_path, processed_folder_path, conn, logg):\n logg.writing_log(conn, 'Starting moving processed fb2 files')\n if os.listdir(input_folder_path):\n for file_name in os.listdir(input_folder_path):\n os.rename(os.path.join(input_folder_path, file_name), os.path.join(processed_folder_path, file_name))\n logg.writing_log(conn, 'All processed files are moved to processed folder')\n else:\n logg.writing_log(conn, 'The folder is empty, nothing to move')\n conn.commit()\n conn.close()",
"def moveDirectoryContents(self, source, target, force=False):\n if source.endswith('/') or source.endswith('\\\\'):\n source += '*'\n else:\n source += os.path.sep + '*'\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))",
"def _do_move(self, artist, album, song):\n try:\n move_to = \"{0}{1}/{2}/\".format(self.dupe_dir, \n artist, album)\n if not os.path.exists(move_to):\n os.makedirs(move_to)\n \n shutil.move(song['path'], move_to)\n self.moved.append(song)\n return 1\n except:\n self.logger.error(\"Could not move file: {0}\".format(str(song['path'])))\n return 0",
"def move_tensorboard_files(event_dir: pathlib.Path) -> None:\n tensorboard_dir = tensorboard.get_base_path({})\n tensorboard_files = tensorboard.util.find_tb_files(event_dir)\n for file in tensorboard_files:\n file.rename(tensorboard_dir.joinpath(file.name))",
"def __resortFiles(self):\n sortColumn = self.filesTree.sortColumn()\n self.filesTree.sortItems(\n 1, self.filesTree.header().sortIndicatorOrder())\n self.filesTree.sortItems(\n sortColumn, self.filesTree.header().sortIndicatorOrder())",
"def move_container_files(self, containers_to_move):\n for container in containers_to_move:\n print(\"Moving {} to {}....\".format(container.file.path, self.container_directory))\n\n original_path = container.file.path\n new_name = os.path.basename(container.file.name)\n with open(original_path, \"rb\") as f:\n container.file.save(new_name, File(f), save=True)\n\n os.remove(original_path)",
"def _move_files(topdatadir, startdate, model_forcing):\n\n curdate = startdate\n subdir = f\"{topdatadir}/cf_{model_forcing}\"\n subdir += f\"_{curdate.year:04d}{curdate.month:02d}\"\n files = glob.glob(f\"{subdir}/*.NC\")\n for filename in files:\n shutil.move(filename, os.path.join(topdatadir, os.path.basename(filename)))\n shutil.rmtree(subdir)",
"def bulk_move_to_dir(target_files, dest_dir_path):\n if target_files is not None:\n for target_file in target_files:\n move_to_dir(target_file, dest_dir_path)",
"def move(self, srcFS, srcPath, dstFS, dstPath):\n raise NotImplementedError, \"TODO(travis)\"",
"def testMoveFiles(loggingMixin, mocker, nMaxFiles):\n # Setup\n hltMode = \"C\"\n # Mocks\n # Mock for moving files\n mMove = mocker.MagicMock()\n mocker.patch(\"overwatch.base.replay.shutil.move\", mMove)\n # Mock for retrieving the HLT mode\n setupRetrieveHLTModeMock(hltMode = hltMode, mocker = mocker)\n\n fileLocation = os.path.dirname(os.path.realpath(__file__))\n baseDir = os.path.join(fileLocation, \"replayData\")\n destinationDir = os.path.join(fileLocation, \"destinationDir\")\n nMoved = replay.moveFiles(baseDir = baseDir,\n destinationDir = destinationDir,\n nMaxFiles = nMaxFiles)\n\n # Determine expected values\n availableFiles = list(replay.availableFiles(baseDir = baseDir))\n availableFiles = [(source, os.path.join(destinationDir, name)) for source, name in availableFiles if \"combined\" not in source]\n\n # If there aren't enough files, don't check that we've transferred as many as requested because\n # it won't be possible.\n if not len(availableFiles) < nMaxFiles:\n assert nMoved == nMaxFiles\n\n # For each call, we expand each tuple of args.\n assert mMove.mock_calls == [mocker.call(*args) for args in availableFiles[:nMaxFiles]]",
"def move_backups(self, name, source, destination, regex):\n files = os.listdir(source)\n pattern = re.compile(regex)\n for entry in files:\n match = pattern.match(entry)\n if match is None:\n continue\n if name == match.group(1):\n self.logger.debug('Archiving %s', entry)\n path = os.path.join(source, entry)\n result = self.os_rename(path, os.path.join(destination, entry))\n if result != 0:\n return result\n return 0",
"def move_files(from_dir, to_dir, keyword):\n \n if not os.path.exists(to_dir):\n os.mkdir(to_dir)\n \n if keyword == None:\n # If keyword is left empty, from_dir is considered a list of files.\n to_move = from_dir\n else:\n to_move = glob.glob(os.path.join(from_dir, '*' + keyword + '*'))\n \n n_moved = 0 \n for f in to_move:\n if os.path.isfile(f):\n shutil.move(f, to_dir)\n n_moved += 1\n \n print \"Moved %i files to %s.\" % (n_moved, to_dir)",
"def move_word_files(self):\n destination_file = os.getcwd() + \"\\word_files\"\n for file in os.listdir():\n if file.endswith(\".docx\"):\n try:\n shutil.move(file, destination_file)\n except shutil.Error:\n pass\n for file in os.listdir():\n if file.endswith('.docx'):\n os.unlink(file)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create metrics of gauge type for filesystem replica link lag, with the local filesystem name, replication direction, remote array name, remote filesystem name and replication status as labels. | def _replica_links_lag(self):
for f in self.fb.get_filesystem_replica_links():
self.replica_links_lag.add_metric([f.local_file_system.name,
f.direction,
f.remote.name,
f.remote_file_system.name,
f.status], -1 if f.lag is None else f.lag) | [
"def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))",
"def update_load_metrics(self):\n\n response = self.gcs_client.get_all_resource_usage(timeout=60)\n resources_batch_data = response.resource_usage_data\n log_resource_batch_data_if_desired(resources_batch_data)\n\n # Tell the readonly node provider what nodes to report.\n if self.readonly_config:\n new_nodes = []\n for msg in list(resources_batch_data.batch):\n node_id = msg.node_id.hex()\n new_nodes.append((node_id, msg.node_manager_address))\n self.autoscaler.provider._set_nodes(new_nodes)\n\n mirror_node_types = {}\n cluster_full = False\n if (\n hasattr(response, \"cluster_full_of_actors_detected_by_gcs\")\n and response.cluster_full_of_actors_detected_by_gcs\n ):\n # GCS has detected the cluster full of actors.\n cluster_full = True\n for resource_message in resources_batch_data.batch:\n node_id = resource_message.node_id\n # Generate node type config based on GCS reported node list.\n if self.readonly_config:\n # Keep prefix in sync with ReadonlyNodeProvider.\n node_type = format_readonly_node_type(node_id.hex())\n resources = {}\n for k, v in resource_message.resources_total.items():\n resources[k] = v\n mirror_node_types[node_type] = {\n \"resources\": resources,\n \"node_config\": {},\n \"max_workers\": 1,\n }\n if (\n hasattr(resource_message, \"cluster_full_of_actors_detected\")\n and resource_message.cluster_full_of_actors_detected\n ):\n # A worker node has detected the cluster full of actors.\n cluster_full = True\n resource_load = dict(resource_message.resource_load)\n total_resources = dict(resource_message.resources_total)\n available_resources = dict(resource_message.resources_available)\n\n waiting_bundles, infeasible_bundles = parse_resource_demands(\n resources_batch_data.resource_load_by_shape\n )\n\n pending_placement_groups = list(\n resources_batch_data.placement_group_load.placement_group_data\n )\n\n use_node_id_as_ip = self.autoscaler is not None and self.autoscaler.config[\n \"provider\"\n ].get(\"use_node_id_as_ip\", False)\n\n # \"use_node_id_as_ip\" is a hack meant to address situations in\n # which there's more than one Ray node residing at a given ip.\n # TODO (Dmitri): Stop using ips as node identifiers.\n # https://github.com/ray-project/ray/issues/19086\n if use_node_id_as_ip:\n peloton_id = total_resources.get(\"NODE_ID_AS_RESOURCE\")\n # Legacy support https://github.com/ray-project/ray/pull/17312\n if peloton_id is not None:\n ip = str(int(peloton_id))\n else:\n ip = node_id.hex()\n else:\n ip = resource_message.node_manager_address\n self.load_metrics.update(\n ip,\n node_id,\n total_resources,\n available_resources,\n resource_load,\n waiting_bundles,\n infeasible_bundles,\n pending_placement_groups,\n cluster_full,\n )\n if self.readonly_config:\n self.readonly_config[\"available_node_types\"].update(mirror_node_types)",
"def _create_metrics(self):\n self.registry = prometheus_client.CollectorRegistry()\n self.quota_free_count = prometheus_client.Gauge(\n 'kuryr_quota_free_count', 'Amount of quota available'\n ' for the network resource', labelnames={'resource'},\n registry=self.registry)\n\n self.port_quota_per_subnet = prometheus_client.Gauge(\n 'kuryr_port_quota_per_subnet', 'Amount of ports available'\n ' on Subnet', labelnames={'subnet_id', 'subnet_name'},\n registry=self.registry)\n\n self.lbs_members_count = prometheus_client.Gauge(\n 'kuryr_critical_lb_members_count', 'Amount of members per '\n 'critical Load Balancer pool',\n labelnames={'lb_name', 'lb_pool_name'},\n registry=self.registry)\n\n self.lbs_state = prometheus_client.Enum(\n 'kuryr_critical_lb_state', 'Critical Load Balancer State',\n labelnames={'lb_name'},\n states=['ERROR', 'ACTIVE', 'DELETED', 'PENDING_CREATE',\n 'PENDING_UPDATE', 'PENDING_DELETE'],\n registry=self.registry)\n\n buckets = (10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, _INF)\n self.pod_creation_latency = prometheus_client.Histogram(\n 'kuryr_pod_creation_latency', 'Time taken for a pod to have'\n ' Kuryr annotations set', buckets=buckets, registry=self.registry)\n\n self.load_balancer_readiness = prometheus_client.Counter(\n 'kuryr_load_balancer_readiness', 'This counter is increased when '\n 'Kuryr notices that an Octavia load balancer is stuck in an '\n 'unexpected state', registry=self.registry)\n\n self.port_readiness = prometheus_client.Counter(\n 'kuryr_port_readiness', 'This counter is increased when Kuryr '\n 'times out waiting for Neutron to move port to ACTIVE',\n registry=self.registry)",
"def nginx_status_metrics(self):\n\n try:\n nginx_status_conn = urllib2.urlopen(self.url)\n nginx_status_data = nginx_status_conn.read()\n self.nginx_status_available = True\n except urllib2.URLError:\n print 'status err URLError: check the URL and that Nginx running.'\n sys.exit(1)\n except Exception:\n print 'status err failed to obtain nginx status metrics.'\n sys.exit(1)\n\n if self.nginx_status_available:\n # Use regexes to parse /nginx_stats.\n match1 = re.search(r'Active connections:\\s+(\\d+)', nginx_status_data)\n match2 = re.search(r'\\s*(\\d+)\\s+(\\d+)\\s+(\\d+)', nginx_status_data)\n match3 = re.search(r'Reading:\\s*(\\d+)\\s*Writing:\\s*(\\d+)\\s*'\n 'Waiting:\\s*(\\d+)', nginx_status_data)\n print 'metric active_connections int64', int(match1.group(1))\n print 'metric accepted_connections int64', int(match2.group(1))\n print 'metric handled_connections int64', int(match2.group(2))\n print 'metric number_of_requests int64', int(match2.group(3))\n print 'metric connections_reading int64', int(match3.group(1))\n print 'metric connections_writing int64', int(match3.group(2))\n print 'metric connections_waiting int64', int(match3.group(3))\n print 'status ok succeeded in obtaining nginx status metrics.'\n else:\n print 'status err failed to obtain nginx status metrics.'\n sys.exit(1)",
"def test_gauge(self):\n # Create a metrics with no metric instances\n mf = pmp.utils.create_metric_family(\n self.gauge_metric_name, self.gauge_metric_help, self.gauge_metric_type, []\n )\n self.assertIsInstance(mf, pmp.MetricFamily)\n self.assertEqual(len(mf.metric), 0)\n\n # Create it with metrics\n mf = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n )\n self.assertIsInstance(mf, pmp.MetricFamily)\n self.assertEqual(mf.name, self.gauge_metric_name)\n self.assertEqual(mf.help, self.gauge_metric_help)\n self.assertEqual(mf.type, self.gauge_metric_type)\n\n # Create another and check equal\n mf_ = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n self.assertEqual(mf, mf_)\n\n for m in mf_.metric:\n self.assertEqual(m.timestamp_ms, 0)\n\n # Create another with timestamp\n mf_ = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n timestamp=True,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n for m in mf_.metric:\n self.assertNotEqual(m.timestamp_ms, 0)\n\n self.assertNotEqual(mf, mf_)\n\n # Create Gauge with const_labels\n mf_ = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n const_labels=self.const_labels,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n # Check that const_label is present in the LabelPair associated\n # with each metric instance.\n for m in mf_.metric:\n labels = [lp.name for lp in m.label]\n self.assertIn(\"app\", labels)\n\n self.assertNotEqual(mf, mf_)\n\n # Check Gauge can be round-tripped through encode and decode\n payload = pmp.encode(mf)\n self.assertIsInstance(payload, bytes)\n _mf = pmp.decode(payload)[0]\n self.assertEqual(mf, _mf)",
"def get_metric_annotation():\n annotations = {\n 'status': '# HELP {0} Current status of IB application.\\n\\\n# TYPE {0} gauge\\n'.format(get_metric_name('status'))}\n return annotations",
"def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))",
"def _create_gauge(self, name: str, attributes: Attributes = None):\n otel_safe_name = _get_otel_safe_name(name)\n key = _generate_key_name(name, attributes)\n\n gauge = self.meter.create_observable_gauge(\n name=otel_safe_name,\n callbacks=[partial(self.read_gauge, _generate_key_name(name, attributes))],\n )\n self.map[key] = Observation(DEFAULT_GAUGE_VALUE, attributes)\n\n return gauge",
"def test_metrics_prometheus(self, master_ar_process):\n url = master_ar_process.make_url_from_path('/nginx/metrics')\n\n resp = requests.get(\n url,\n allow_redirects=False,\n )\n\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'text/plain'\n assert resp.text.startswith('# HELP nginx_vts_info Nginx info')",
"def replication_info():\n\n def _get_last_packet_name(location, pattern):\n try:\n entries = [os.path.join(location, e) for e in os.listdir(location)]\n except OSError as e:\n logging.warning(e)\n return None\n pattern = re.compile(pattern)\n entries = filter(lambda x: pattern.search(x), entries)\n entries = filter(os.path.isfile, entries)\n entries = _sort_natural(entries, reverse=True) # latest first\n return os.path.split(entries[0])[-1] if entries else None\n\n # TODO(roman): Cache this response:\n return jsonify({\n 'last_packet': _get_last_packet_name(\n current_app.config['REPLICATION_PACKETS_DIR'],\n \"replication-[0-9]+.tar.bz2$\"\n ),\n })",
"def update_gauge(self):\n gauge_metrics = self._fetch_gauge_metrics_and_clear()\n self._logger.info('update_gauge. gauge_metrics = %s',\n build_metrics_gauge_data(gauge_metrics))",
"def plot_metrics(self, instance_type, directory='.'):\n max_mem_utilization_percent_chunks_all_pts = []\n max_mem_used_MB_chunks_all_pts = []\n min_mem_available_MB_chunks_all_pts = []\n max_cpu_utilization_percent_chunks_all_pts = []\n max_disk_space_utilization_percent_chunks_all_pts = []\n max_disk_space_used_GB_chunks_all_pts = []\n for i in range(0, self.nTimeChunks):\n self.starttime = self.starttimes[i]\n self.endtime = self.endtimes[i]\n # saving all points for the chunck\n max_mem_utilization_percent_chunks_all_pts.append(self.max_memory_utilization_all_pts())\n max_mem_used_MB_chunks_all_pts.append(self.max_memory_used_all_pts())\n min_mem_available_MB_chunks_all_pts.append(self.min_memory_available_all_pts())\n max_cpu_utilization_percent_chunks_all_pts.append(self.max_cpu_utilization_all_pts())\n max_disk_space_utilization_percent_chunks_all_pts.append(self.max_disk_space_utilization_all_pts())\n max_disk_space_used_GB_chunks_all_pts.append(self.max_disk_space_used_all_pts())\n # writing values as tsv\n input_dict ={\n 'max_mem_used_MB': (max_mem_used_MB_chunks_all_pts, 1),\n 'min_mem_available_MB': (min_mem_available_MB_chunks_all_pts, 1),\n 'max_disk_space_used_GB': (max_disk_space_used_GB_chunks_all_pts, 1),\n 'max_mem_utilization_percent': (max_mem_utilization_percent_chunks_all_pts, 1),\n 'max_disk_space_utilization_percent': (max_disk_space_utilization_percent_chunks_all_pts, 1),\n 'max_cpu_utilization_percent': (max_cpu_utilization_percent_chunks_all_pts, 5)\n }\n self.list_files.append(self.write_tsv(directory, **input_dict))\n self.list_files.append(self.write_metrics(instance_type, directory))\n # writing html\n self.list_files.append(self.write_html(instance_type, directory))",
"def define_gauge_metric(registry, metric_obj):\n labels_map = metric_obj.get(\"labels\", {})\n labels = labels_map.keys()\n gauge = Gauge(\n name=metric_obj.get(\"metric_name\"),\n documentation=metric_obj.get(\"description\"),\n registry=registry,\n labelnames=labels,\n )\n return gauge, labels_map",
"def get_server_status():\n result = {}\n cpu_index = \"metriccpumetricsets\" + \"-\" + datetime.datetime.now().strftime('%Y%m%d')\n memory_index = \"metricmemorymetricsets\" + \"-\" + datetime.datetime.now().strftime('%Y%m%d')\n process_index = \"metricprocessmetricsets\" + \"-\" + datetime.datetime.now().strftime('%Y%m%d')\n diskio_index = \"metricdiskiometricsets\" + \"-\" + datetime.datetime.now().strftime('%Y%m%d')\n network_index = \"metricnetworkmetricsets\" + \"-\" + datetime.datetime.now().strftime('%Y%m%d')\n\n\n result['used_cpu_pct'] = get_cpu_pct(cpu_index) # cpu 利用率\n result['used_mem_pct'] = get_memory_pct(memory_index) # 内存利用率\n result['used_swap_pct'] = get_swap_pct(memory_index) # swap 利用率\n result['process_nums'] = get_process_num(process_index) # 进程数量\n\n res = get_diskio(diskio_index)\n result['disk_write'] = res[\"disk_write\"] # 磁盘写入速度\n result['disk_read'] = res[\"disk_read\"] # 磁盘读取速度\n result['read_write_count'] = res['read_write_count'] # 读写数量\n result['vda'] = res[\"disk_write\"] # vda 目前定义为磁盘写入速度\n\n res = get_network(network_index)\n result['network_in'] = '%.2f' % res['network_in'] # 网络流入\n result['network_out'] = '%.2f' % res['network_out'] # 网络流出\n result['data_second'] = '%.2f' % (res['network_in'] + res['network_out']) # 流量实时统计定义为网络流入和流出速度和\n return result",
"def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )",
"def generate_latest(registry=Registry):\n\n def sample_line(line, metric_type):\n if line.labels:\n labelstr = '{{{0}}}'.format(','.join(\n ['{0}=\"{1}\"'.format(\n k, v.replace('\\\\', r'\\\\').replace('\\n', r'\\n').replace('\"', r'\\\"'))\n for k, v in sorted(line.labels.items())]))\n else:\n labelstr = ''\n timestamp = ''\n if line.timestamp is not None:\n # Convert to milliseconds.\n timestamp = ' {0:d}'.format(int(float(line.timestamp) * 1000))\n name = line.name\n if metric_type == 'counter' and name.endswith('_total'):\n name = name[:-6]\n return '{0}{1} {2}{3}\\n'.format(\n name, labelstr, int(line.value), timestamp)\n\n output = []\n for metric in registry.collect():\n try:\n mname = metric.name\n mtype = metric.type\n # Munging from OpenMetrics into Prometheus format.\n if mtype == 'counter':\n mname = mname\n elif mtype == 'info':\n mname = mname + '_info'\n mtype = 'gauge'\n elif mtype == 'stateset':\n mtype = 'gauge'\n elif mtype == 'gaugehistogram':\n # A gauge histogram is really a gauge,\n # but this captures the structure better.\n mtype = 'histogram'\n elif mtype == 'unknown':\n mtype = 'untyped'\n help_str = '# HELP {0} {1}\\n'.format(mname, metric.documentation.replace('\\\\', r'\\\\').replace('\\n', r'\\n'))\n if 'Multiprocess' not in help_str:\n continue\n output.append('# HELP {0} {1}\\n'.format(\n mname, metric.documentation.replace('\\\\', r'\\\\').replace('\\n', r'\\n')))\n output.append('# TYPE {0} {1}\\n'.format(mname, mtype))\n\n for s in metric.samples:\n for suffix in ['_created', '_gsum', '_gcount']:\n if s.name == metric.name + suffix:\n break\n else:\n line = sample_line(s, mtype)\n if not line:\n continue\n output.append(line)\n except Exception as exception:\n exception.args = (exception.args or ('',)) + (metric,)\n raise\n\n return ''.join(output).encode('utf-8')",
"def update_misc_graphs(self):\n \n def load_from_gcs(BUCKET_NAME, filename):\n return pd.read_csv(\"gcs://\" + BUCKET_NAME + '/' + filename)\n PROJECT_ID = 'hawkfish-prod-0c4ce6d0'\n bluelabs, survey_monkey = pd.read_csv(\"gcs://gabriel_bucket_test/bluelabs_superset.csv\"), \\\n pd.read_csv(\"gcs://gabriel_bucket_test/agg_surveymonkey_data.csv\")\n \n survey_monkey['candidates'] = survey_monkey['name_first_choice_candidates']\n survey_monkey.loc[survey_monkey.name_first_choice_candidates == 'None of the above', 'candidates'] = 'Other'\n survey_monkey.loc[survey_monkey.name_first_choice_candidates == 'No Answer', 'candidates']= 'Undecided'\n \n answered = survey_monkey[['date','source_id']].append(bluelabs[['date', 'source_id']])\n completed = survey_monkey[survey_monkey.response_status=='completed'][['date',\n 'source_id']].append(bluelabs[bluelabs.response_status=='completed'][['date', 'source_id']])\n\n temp = answered.groupby(['date'])['source_id'].value_counts().to_frame()\n temp = temp.rename(columns={'source_id':'answered_counts'}).reset_index()\n\n survey_counts = completed.groupby(['date'])['source_id'].value_counts().to_frame()\n\n survey_counts = survey_counts.rename(columns={'source_id':'completed_counts'}).reset_index()\n survey_counts = survey_counts.merge(temp, on=['date', 'source_id'], how='outer')\n\n\n # calculate totals for both dashboard\n temp = answered.date.value_counts().to_frame().reset_index()\n temp = temp.rename(columns={'date':'answered_counts', \n 'index':'date'})\n temp_2 = completed.date.value_counts().to_frame().reset_index()\n temp_2 = temp_2.rename(columns={'date':'completed_counts', \n 'index':'date'})\n temp_2 = temp_2.merge(temp, on=['date'], how='outer')\n temp_2['source_id'] = 'totals'\n\n survey_counts = survey_counts.append(temp_2)\n \n bluelabs['candidates'] = bluelabs['name_first_choice_candidates']\n combo_df = survey_monkey[['candidates', 'date','source_id']].append(bluelabs[['candidates','date', 'source_id']])\n \n support_df = combo_df[combo_df.candidates.notna()]\n candidates = list(support_df.candidates.unique())\n\n final_dict = {}\n\n for x in candidates:\n temp = support_df[support_df.candidates==x]\n temp_2 = temp.groupby(['date', \n 'source_id'])['candidates'].count()/support_df.groupby(['date', \n 'source_id'])['candidates'].count()\n final_dict[x] = temp_2\n\n final_df = pd.DataFrame(final_dict).reset_index()\n final_df = final_df.fillna(0)\n \n totals_df = pd.DataFrame()\n for x in candidates:\n temp = support_df[support_df.candidates==x]\n temp_2 = temp.groupby(['date'])['source_id'].count()/support_df.groupby(['date'])['source_id'].count()\n totals_df[x] = temp_2 \n totals_df = totals_df.fillna(0)\n totals_df = totals_df.reset_index()\n totals_df['source_id'] ='totals'\n final_df = final_df.append(totals_df)\n \n final_df = final_df.merge(survey_counts, on=['date', 'source_id'], how='outer')\n final_df = final_df.fillna(0)\n \n turnout_df = survey_monkey[['turnout', \n 'date','source_id']].append(bluelabs[['turnout','date', 'source_id']])\n \n turnout_df.turnout = turnout_df.turnout.fillna('No answer')\n responses = list(turnout_df.turnout.unique())\n\n x = 'No answer'\n temp = turnout_df.groupby(['date', \n 'source_id'])['turnout'].value_counts()/turnout_df.groupby(['date', \n 'source_id'])['turnout'].count()\n temp = temp.to_frame()\n temp = temp.rename(columns={'turnout':'turnout_percentage'})\n temp = temp.reset_index()\n\n totals_turnout = turnout_df.groupby(['date'])['turnout'].value_counts()/\\\n turnout_df.groupby(['date'])['turnout'].count()\n totals_turnout = totals_turnout.to_frame()\n totals_turnout = totals_turnout.rename(columns={'turnout':'turnout_percentage'})\n totals_turnout = totals_turnout.reset_index()\n totals_turnout['source_id'] = 'totals'\n\n totals_turnout = totals_turnout.append(temp)\n totals_turnout['turnout_percentage'] = totals_turnout['turnout_percentage'] * 100\n\n final_df = final_df.merge(totals_turnout, on=['date','source_id'], how='outer')\n \n final_df[candidates] = final_df[candidates].astype(float)\n final_df.loc[final_df.source_id=='bluelabs', 'source_id']='BLUELABS'\n final_df.loc[final_df.source_id=='survey_monkey', 'source_id']='SURVEY MONKEY'\n final_df['date'] = final_df['date'].apply(lambda x: '-'.join(['20'+ x.split('/')[2], \n x.split('/')[0],\n x.split('/')[1]]) if '/' in x else x)\n final_df['date'] = pd.to_datetime(final_df['date'])\n\n final_df['test'] = 'test'\n final_df['qturnout'] = final_df['turnout']\n tf = pd.read_csv(\"gcs://togzhan_bucket/survey_dashboard/combined_support_test.csv\")\n final_df = final_df[list(tf.columns)]\n\n storage_client = storage.Client(project=PROJECT_ID)\n bucket = storage_client.get_bucket(\"gabriel_bucket_test\")\n print('-' * 20)\n print(\"Saving combined support dataset..\")\n blob = bucket.blob(\"combined_support_test2.csv\")\n blob.upload_from_string(final_df.to_csv(index=False))\n \n phone_types = load_from_gcs('user_ground_truth',\n 'additional_data/prim_march_20191130_supplement_sample_for_vendor_phonetype.csv')\n\n phone_types_2 = load_from_gcs('user_ground_truth',\n 'additional_data/prim_march_20191130_sample_for_vendor_bilingual_phonetype.csv')\n\n phone_types_3 = load_from_gcs('user_ground_truth',\n 'additional_data/prim_march_20191130_sample_for_vendor_english_phonetype.csv')\n\n\n phone_types = phone_types.append(phone_types_2).append(phone_types_3)\n\n # changing the labels, so it's more understandable\n phone_types.loc[phone_types.phone_type=='L', 'phone_type'] = \"Landline\"\n phone_types.loc[phone_types.phone_type=='C', 'phone_type'] = \"Cell\"\n\n #merge back with bluelabs data\n bluelabs = bluelabs.merge(phone_types, left_on='respondents_id', \n right_on='voterbase_id', how='left')\n \n \n # count support for all candidates for all phone types\n bl_support_df = bluelabs[bluelabs.candidates.notna()]\n bl_final_dict = {}\n for x in candidates:\n # subset for this candidate only\n temp = bl_support_df[bl_support_df.candidates==x]\n temp_2 = temp.groupby(['date', 'source_id'])['candidates'].count()/\\\n bl_support_df.groupby(['date', 'source_id'])['candidates'].count()\n bl_final_dict[x] = temp_2\n\n bl_final_df = pd.DataFrame(bl_final_dict).reset_index()\n bl_final_df = bl_final_df.drop(columns=['source_id'])\n bl_final_df['phone_type'] ='totals' \n \n # count support by candidates but now for phone type\n bl_phone_dict = {}\n for x in candidates:\n temp = bl_support_df[bl_support_df.candidates==x]\n temp_2 = temp.groupby(['date', 'phone_type'])['candidates'].count()/\\\n bl_support_df.groupby(['date', 'phone_type'])['candidates'].count()\n bl_phone_dict[x] = temp_2\n\n bl_phone_df = pd.DataFrame(bl_phone_dict).reset_index()\n \n bl_final_df = bl_final_df.append(bl_phone_df)\n bl_final_df = bl_final_df.sort_values(by='date')\n bl_final_df[candidates]= bl_final_df[candidates]*100\n bl_final_df = bl_final_df.fillna(0)\n \n def saving_to_gcs(filepath, df, bucket_name):\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(filepath)\n blob.upload_from_string(df.to_csv(index=False))\n \n bucket_name = 'gabriel_bucket_test'\n filepath = \"bluelabs_support2.csv\"\n print('-' * 20)\n print(\"Saving bl support dataset..\")\n saving_to_gcs(filepath, bl_final_df, bucket_name)\n \n sm_support_df = survey_monkey\n\n sm_final_dict = {}\n for x in candidates:\n temp = sm_support_df[sm_support_df.candidates==x]\n temp_2 = temp.groupby(['date', 'source_id'])['candidates'].count()/\\\n sm_support_df.groupby(['date', 'source_id'])['candidates'].count()\n sm_final_dict[x]=temp_2\n\n sm_final_df = pd.DataFrame(sm_final_dict).reset_index()\n sm_final_df = sm_final_df.sort_values(by=['date'])\n sm_final_df = sm_final_df.drop(columns=['source_id'])\n # sm_final_df['qturnout'] = 'totals'\n sm_final_df = sm_final_df.fillna(0)\n \n sm_final_df[candidates] = sm_final_df[candidates]*100\n bucket_name = 'gabriel_bucket_test'\n print('-' * 20)\n print(\"Saving sm support dataset..\")\n filepath = \"sm_support2.csv\"\n saving_to_gcs(filepath, sm_final_df, bucket_name)",
"def store_reported_location(fileName, logNames):\n\n obj_name_mapping = {}\n\n for name_str in logNames:\n\n gcl_name = gdp.GDP_NAME(name_str)\n gcl_handle = gdp.GDP_GCL(gcl_name, gdp.GDP_MODE_RO)\n obj_name_mapping[gcl_handle] = name_str\n gcl_handle.subscribe(0, 0, None)\n\n while True:\n event = gdp.GDP_GCL.get_next_event(None)\n timestamp_end = time.time()\n datum = event[\"datum\"]\n gcl_name = obj_name_mapping[event[\"gcl_handle\"]]\n data = datum[\"data\"]\n timestamp_start = float(json.loads(data)['timestamp_start'])\n print gcl_name + str(': ') + 'New location information received.'\n print 'Latency: ' + str(timestamp_end - timestamp_start)\n string = gcl_name + ',' + str(timestamp_end) + ',' + data + '\\n'\n with open(fileName, 'a') as the_file:\n the_file.write(string)",
"async def plot_gauge(comfort_level : list = []):\n lietner_img = leitner_bar(comfort_level)\n return lietner_img"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds and sends an embed message with new commits information. | async def process_push_hook(push: models.PushHook):
repository = push.repository
project = push.project
commit_str = "commit" if push.total_commits_count == 1 else "commits"
# Show link to commit compare if there's more than one commit
if push.total_commits_count > 1:
embed_url = f"{repository.homepage}/compare/{push.before[:7]}...{push.after[:7]}"
else:
embed_url = f"{repository.homepage}/commit/{push.after[:7]}"
if push.before == EMPTY_COMMIT:
embed = discord.Embed(title=f"[{project.namespace}/{project.name}] New branch created {push.branch}",
url=embed_url, colour=discord.Colour.light_grey())
embed.set_author(name=push.user_name, icon_url=push.user_avatar)
await send_message(None, embed=embed, avatar_url=push.project.avatar_url)
elif push.after == EMPTY_COMMIT:
embed = discord.Embed(title=f"[{project.namespace}/{project.name}] Branch deleted {push.branch}",
url=embed_url, colour=discord.Colour.light_grey())
embed.set_author(name=push.user_name, icon_url=push.user_avatar)
await send_message(None, embed=embed, avatar_url=push.project.avatar_url)
# If there are no commits, do not show a message
if not push.total_commits_count:
return
embed = discord.Embed(title=f"[{project.namespace}/{project.name}:{push.branch}] "
f"{push.total_commits_count} new {commit_str}",
url=embed_url, colour=discord.Colour.blurple())
embed.set_author(name=push.user_name, icon_url=push.user_avatar)
embed.description = ""
for commit in push.commits:
message = commit.message.splitlines()[0]
embed.description += f"[`{commit.id[:7]}`]({commit.url}) {message} - {commit.author.name}\n"
print("Sending push message")
await send_message(None, embed=embed, avatar_url=push.project.avatar_url) | [
"def command(self, bot, comm, groups):\n commit_message = self.plugin.get_commit_message()\n bot.reply(comm, u'{user}: {msg}', kwvars={'msg': commit_message})",
"def commit(self, msg=None):\n self.log.debug(\"committing in git: %s\" % msg)\n completemsg = \"EasyBuild-commit from %s (time: %s, user: %s) \\n%s\" % (socket.gethostname(),\n time.strftime(\"%Y-%m-%d_%H-%M-%S\"),\n getpass.getuser(),\n msg)\n self.log.debug(\"git status: %s\" % self.client.status())\n try:\n self.client.commit('-am \"%s\"' % completemsg)\n self.log.debug(\"succesfull commit\")\n except GitCommandError, err:\n self.log.warning(\"Commit from working copy %s (msg: %s) failed, empty commit?\\n%s\" % (self.wc, msg, err))\n try:\n info = self.client.push()\n self.log.debug(\"push info: %s \" % info)\n except GitCommandError, err:\n self.log.warning(\"Push from working copy %s to remote %s (msg: %s) failed: %s\" % (self.wc,\n self.repo,\n msg,\n err))",
"async def fetch_commits(self):\n for repo in self.config['repos'].split(','):\n since = datetime.min\n async for msg in self.channel.history(limit=None):\n if not msg.embeds:\n continue\n e = msg.embeds[0]\n if e.title == 'github commit' and e.timestamp and repo in e.description: # type: ignore\n since = e.timestamp\n break\n \n await self.update_commit_activity(repo, since)",
"def build_commit_msg(author, reviewers, source_branch, target_branch,\n commit_message, mp_web_link):\n return \"Merge {} into {} [a={}] [r={}]\\n\\n{}\\n\\nMP: {}\".format(\n source_branch, target_branch, author,\n reviewers, commit_message, mp_web_link)",
"def commits() -> None:\n project = get_project(require=True)\n commits_data = request('get', f'/api/v0/projects/{project.id}/commits/').json()\n current_commit = None\n try:\n current_commit = get_current_commit(project.directory)\n except Exception:\n pass\n\n # Filter out ad-hoc executions (and remove the adhocness marker)\n commits_data = [commit for commit in commits_data if not commit.pop('adhoc', False)]\n\n # Mark the current commit\n for commit in commits_data:\n if commit['identifier'] == current_commit:\n commit['identifier'] += ' (current)'\n\n print_table(commits_data)",
"def commit_details(build_id): \n build = history.fetch(build_id)\n return render_template('build.html', build=build)",
"def commit(self):\n run('git', 'add', '.')\n run('git', 'commit', '-a', '-m', 'updates')",
"def get_github_commits():\n utcnow = datetime.datetime.utcnow()\n yesterday = utcnow - datetime.timedelta(hours=24)\n yesterday = yesterday.replace(hour=12, minute=0, second=0)\n iso = yesterday.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n txt = [\"> IEM Code Pushes <to branch> on Github\\n\"]\n html = [\"<h3>IEM Code Pushes <to branch> on Github</h3>\"]\n\n # get branches, main is first!\n branches = [\"main\"]\n req = exponential_backoff(requests.get, IEM_BRANCHES, timeout=30)\n for branch in req.json():\n if branch[\"name\"] == \"main\":\n continue\n branches.append(branch[\"name\"])\n\n hashes = []\n links = []\n for branch in branches:\n uri = (\n f\"https://api.github.com/repos/akrherz/iem/commits?since={iso}&\"\n f\"sha={branch}\"\n )\n req2 = exponential_backoff(requests.get, uri, timeout=30)\n # commits are in reverse order\n for commit in req2.json()[::-1]:\n if commit[\"sha\"] in hashes:\n continue\n hashes.append(commit[\"sha\"])\n timestring = commit[\"commit\"][\"author\"][\"date\"]\n utcvalid = datetime.datetime.strptime(\n timestring, \"%Y-%m-%dT%H:%M:%SZ\"\n )\n valid = utcvalid.replace(tzinfo=pytz.utc).astimezone(\n pytz.timezone(\"America/Chicago\")\n )\n data = {\n \"stamp\": valid.strftime(\"%b %-d %-2I:%M %p\"),\n \"msg\": commit[\"commit\"][\"message\"],\n \"htmlmsg\": htmlize(commit[\"commit\"][\"message\"])\n .replace(\"\\n\\n\", \"\\n\")\n .replace(\"\\n\", \"<br />\\n\"),\n \"branch\": branch,\n \"url\": commit[\"html_url\"][:-20], # chomp to make shorter\n \"i\": len(links) + 1,\n }\n links.append(\"[%(i)s] %(url)s\" % data)\n txt.append(\n mywrap(\" %(stamp)s[%(i)s] <%(branch)s> %(msg)s\" % data)\n )\n html.append(\n (\n '<li><a href=\"%(url)s\">%(stamp)s</a> '\n \"<%(branch)s> %(htmlmsg)s</li>\\n\"\n )\n % data\n )\n\n if len(txt) == 1:\n txt = txt[0] + \" No code commits found in previous 24 Hours\"\n html = html[0] + (\n \"<strong>No code commits found \" \"in previous 24 Hours</strong>\"\n )\n else:\n txt = \"\\n\".join(txt) + \"\\n\\n\" + \"\\n\".join(links)\n html = html[0] + \"<ul>\" + \"\\n\".join(html[1:]) + \"</ul>\"\n\n return txt + \"\\n\\n\", html + \"<br /><br />\"",
"def main():\n logging.basicConfig(stream=sys.stdout, level=logging.ERROR)\n logger = logging.getLogger('git-commits2slack')\n args = parse_args()\n if args.verbosity:\n logger.setLevel(logging.DEBUG)\n\n try:\n slack_token = rss2slack.get_slack_token()\n\n if not os.path.isdir(args.git_clone_dir):\n git_clone(args.git_clone_dir, args.git_repo)\n\n os.chdir(args.git_clone_dir)\n commit_ref = git_pull(args.git_clone_dir)\n if not commit_ref:\n logger.info('No new commits.')\n sys.exit(0)\n\n commits = git_show(args.git_clone_dir, commit_ref)\n if not commits:\n # FIXME(zstyblik): error? send message to Slack?\n logger.warning('There should be new commits, but we have none.')\n sys.exit(0)\n\n repo_name = os.path.basename(args.git_clone_dir)\n branch_name = git_branch(args.git_clone_dir)\n commit_count = len(commits)\n\n msg_blocks = [\n format_commit_message(args.git_web, commit[0], commit[1])\n for commit in commits\n ]\n\n heading = format_heading(\n args.git_web, branch_name, repo_name, commit_count\n )\n msg_blocks.insert(0, heading)\n\n slack_client = rss2slack.get_slack_web_client(\n slack_token, args.slack_base_url, args.slack_timeout\n )\n rss2slack.post_to_slack(\n logger, msg_blocks, slack_client, args.slack_channel,\n )\n except Exception:\n logger.debug(traceback.format_exc())\n # TODO(zstyblik):\n # 1. touch error file\n # 2. send error message to the channel\n finally:\n sys.exit(0)",
"async def build_embed(cls, entity, client, event, message_jump_url, detailed):\n if detailed:\n await cls.update_entity_details(entity, client)\n embed = cls.build_embed_detailed(entity)\n extra_fields_set = copy_extra_fields(embed, event)\n if extra_fields_set:\n return embed\n \n else:\n embed = create_base_embed(entity, None if entity.url is None else 'Click to open')\n \n add_embed_footer(embed, entity)\n add_embed_author(embed, event, cls.name, message_jump_url)\n return embed",
"def create_commit(self, event_data_yaml):\n os.chdir(str(self.repository_path))\n sh.git.checkout(self.branch)\n sh.git.add(self.event_dir)\n message_body = (\n '\\n\\nEvent config:\\n~~~yaml\\n{}\\n~~~\\n'.format(event_data_yaml)\n + '\\nScraped with [pyvideo_scrape]'\n + '(https://github.com/pyvideo/pyvideo_scrape)')\n if self.minimal_download:\n message = ('Minimal download: '\n + '{}\\n\\nMinimal download executed for #{}'.format(\n self.title, self.issue)\n + '\\n\\nOnly data that needs [no review](https://'\n + 'github.com/pyvideo/pyvideo_scrape#use-cases) was scraped.'\n + '\\nThis event needs further scraping and human '\n + 'reviewing for the description and other data to show.'\n + message_body)\n sh.git.commit('-m', message)\n sh.git.push('--set-upstream', 'origin', self.branch)\n # ~ sh.git.push('--set-upstream', '--force', 'origin', self.branch)\n sh.git.checkout('master')\n else:\n message = (\n 'Scraped {}\\n\\nFixes #{}'.format(self.branch, self.issue)\n + message_body)\n sh.git.commit('-m', message)\n sh.git.checkout('master')\n logger.debug('Conference {} commited', self.branch)",
"async def update_embed(self) -> None:\n\n self.embed = build_actions_embed(LoggingActions.all_enabled_actions(self.bits))\n await self.message.edit(embed=self.embed)",
"def post(self):\n my_data = json.loads(self.request.body.decode('utf-8'))\n top_repo_path = my_data[\"top_repo_path\"]\n commit_msg = my_data[\"commit_msg\"]\n my_output = self.git.commit(commit_msg, top_repo_path)\n self.finish(my_output)",
"def git_commit_leanpub(msg):\n with pushd(leanpub_repo):\n os.system(f\"\"\"git commit -a -m \"{msg}\" \"\"\")\n os.system(\"git push\")",
"def cmd_commit(message):\n return ['git', 'commit', '-m', message]",
"def ExecuteEmbed(self):\r\n \r\n Embed = DiscordEmbed(title=\"Test Title 123\", \r\n description=\"Test Description 321\",\r\n color=\"eb5e34\") \r\n Embed.set_timestamp()\r\n \r\n self.WEBHOOK.add_embed(Embed)\r\n Execute = self.WEBHOOK.execute()",
"async def on_submit(self, interaction: Interaction) -> None:\n await interaction.response.defer()\n await self.embed_view.update_embed(self.message, content=self.content.value)",
"def add_commit(self, cl, last):\n with self.perf.timer[OVERALL]:\n with self.perf.timer[BUILD]:\n self.__append(\"commit refs/heads/{0}\\n\".format(self.branchname))\n self.__append(\"mark : {0}\\n\".format(cl.change))\n impidx = cl.description.find(p4gf_const.P4GF_IMPORT_HEADER)\n committer_added = False\n if impidx > -1:\n # extract the original author and committer data;\n # note that the order matters with fast-import\n suffix = cl.description[impidx:]\n for key in ('author', 'committer'):\n regex = re.compile(key.capitalize() + r': (.+) (<.+>) (\\d+) (.+)')\n match = regex.search(suffix)\n if match:\n self.__append(\"{key} {fullname} {email} {time} {timezone}\\n\".\n format(key=key,\n fullname=match.group(1),\n email=match.group(2),\n time=match.group(3),\n timezone=match.group(4)))\n committer_added = True\n # prune Git Fusion noise added in p4gf_copy_to_p4\n # (including the newline added between the parts)\n desc = cl.description[0:impidx-1]\n\n # Convoluted logic gates but avoids duplicating code. The point\n # is that we add the best possible committer data _before_\n # adding the description.\n if not committer_added:\n if impidx > -1:\n # old change description that lacked detailed author info,\n # deserves a warning, but otherwise push onward even if the\n # commit checksums will likely differ from the originals\n LOG.warn('commit description did not match committer regex: @{} => {}'.\n format(cl.change, suffix))\n self.__append(\"committer {fullname} {email} {time} {timezone}\\n\".\n format(fullname=self.__full_name_for_user(cl.user),\n email=self.__email_for_user(cl.user),\n time=cl.time,\n timezone=self.timezone))\n desc = cl.description\n self.__add_data(desc)\n\n #if this is not the initial commit, say what it's based on\n #otherwise start with a clean slate\n if last:\n #last is either SHA1 of an existing commit or mark of a commit\n #created earlier in this import operation. Assume a length of\n #40 indicates the former and mark ids will always be shorter.\n if len(last) == 40:\n self.__append(\"from {0}\\n\".format(last))\n else:\n self.__append(\"from :{0}\\n\".format(last))\n else:\n self.__append(\"deleteall\\n\")\n self.__add_files(cl.files)",
"def commit_cm_changes(status):\n if status.cm_changes:\n message = input(('Please enter commit message for the {} staged files (or return to skip):'\n ' ').format(status.cm_changes))\n if message:\n if run_command(['git', 'commit', '-m', message], cwd=CM_WORKING_DIR, uid=CM_OWNER_UID):\n print('Now please push this commit to the upstream on the local server')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds and sends an embed message with notes information. | async def process_note_hook(data: models.NoteHook):
note = data.note
user = data.user
project = data.project
colour = discord.Colour.greyple()
embed = discord.Embed(url=note.url, description=note.description, colour=colour)
embed.set_author(name=user.username, icon_url=user.avatar_url)
if data.issue:
issue = data.issue
embed.title = f"[{project.namespace}/{project.name}] New comment on issue #{issue.iid}: {issue.title}"
if data.commit:
commit = data.commit
embed.title = f"[{project.namespace}/{project.name}] New comment on commit `{commit.id[:7]}`"
if data.merge_request:
merge = data.merge_request
embed.title = f"[{project.namespace}/{project.name}] New comment on merge request !{merge.iid}: {merge.title}"
await send_message(None, embed=embed) | [
"def CreateNote(self):",
"async def note(self, ctx):\n note_embed = discord.Embed(color=discord.Color.blurple())\n note_embed.add_field(name=\"__**Please Note**__\", value=RULES_NOTE)\n await ctx.send(embed=note_embed)",
"async def build_embed(cls, entity, client, event, message_jump_url, detailed):\n if detailed:\n await cls.update_entity_details(entity, client)\n embed = cls.build_embed_detailed(entity)\n extra_fields_set = copy_extra_fields(embed, event)\n if extra_fields_set:\n return embed\n \n else:\n embed = create_base_embed(entity, None if entity.url is None else 'Click to open')\n \n add_embed_footer(embed, entity)\n add_embed_author(embed, event, cls.name, message_jump_url)\n return embed",
"def show_release_notes(self, data):\n notes = json.load(open(RELEASE_NOTES))\n message = ''\n for version, notes in notes.items():\n message += '\\n\\n**' + version + '**\\n\\n' + notes\n self.create_message(\n message,\n roomId=data['roomId']\n )",
"async def build_links_embed():\n embed = helper.embed_builder(\n bot.user.name,\n \"Hello, my friend! I am Valron. My wife has compiled a \"\n + \"list of helpful links for you.\",\n )\n embed.add_field(\n name=\"Invite me to your server with this link\",\n value=f\"[Click me!]({constants.DISCORD_INVITE_LINK})\",\n inline=False,\n )\n embed.add_field(\n name=\"Find out what's new with me from the support discord server\",\n value=f\"[Click me!]({constants.SUPPORT_SERVER_LINK})\",\n inline=False,\n )\n embed.add_field(\n name=\"See how I was made\",\n value=f\"[Click me!]({constants.GITHUB_LINK})\",\n inline=False,\n )\n embed.add_field(\n name=\"Want to support me and my wife?\",\n value=f\"Click any of these: [PayPal]({constants.PAYPAL_LINK}) \"\n + f\"| [Ko-Fi]({constants.KOFI_LINK}) | [GCash]({constants.GCASH_QR_CODE})\",\n inline=False,\n )\n embed.set_footer(\n text=\"/help - main help command\\n\"\n + \"/options - to see the list of supported classes and HP modifiers\"\n )\n\n return embed",
"async def makemodnote(self, ctx, user: MemberOrID, *, note: str):\n\n self.insert(\n author_id=ctx.author.id,\n subject_id=user.id,\n note=note,\n guild_id=ctx.guild.id,\n )\n await ctx.tick()",
"async def sample_embed(self, ctx: ct.ctxType):\n embed = discord.Embed(\n title=\"Sample Embed\",\n url=\"https://youtu.be/dQw4w9WgXcQ\",\n description=\"This is a sample embed.\",\n colour=discord.Colour.dark_blue(),\n )\n embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar.url)\n\n embed.set_thumbnail(url=ctx.author.avatar.url)\n\n embed.add_field(\n name=\"Field1\", value=\"Value under Field1, inline=False\", inline=False\n )\n embed.add_field(\n name=\"Field2\", value=\"Value under Field2, inline=True\", inline=True\n )\n embed.add_field(\n name=\"Field3\", value=\"Value under Field3, inline=True\", inline=True\n )\n\n embed.set_footer(\n text=f\"Requested by {ctx.author.name}\", icon_url=ctx.author.avatar.url\n )\n\n await ctx.reply(embed=embed)",
"async def _create_embed(self, event, info):\n\n e = discord.Embed(url=info.get(\"url\"))\n e.title = \"%s %s!\" % (info.get(\"streamer\"), info.get(\"live_status\"))\n e.add_field(name=\"Stream title\", value=info.get(\"title\"), inline=False)\n e.add_field(name=\"Begin:\", value=event.begin.format(\"HH:mm:ss ZZZ\") + \" (\" + event.begin.humanize() + \")\", inline=False)\n e.add_field(name=\"Duration: \", value=str(event.duration), inline=False)\n #e.add_field(name=\"Link\", value=info.get(\"url\"), inline=False)\n e.set_image(url=info.get(\"thumbnail\") or e.Empty)\n return e",
"async def _view_note(self, ctx: Context, number: int):\n\n author = ctx.author\n\n embed_links = ctx.channel.permissions_for(ctx.guild.me).embed_links\n\n author_str = f\"{author.name}'\"\n\n if author.name[-1].lower() != \"s\":\n author_str += \"s\"\n\n async with self.config.member(author).notes() as notes:\n try:\n note = notes[number-1]\n except IndexError:\n return await ctx.send(\n _(\"Note number {} not found.\").format(number)\n )\n\n msg_info = \"\"\n if note[\"author\"]:\n msg_info += _(\"**Author:** {}\").format(note[\"author\"])\n if note[\"channel\"]:\n msg_info += _(\"\\n**Channel:** {}\").format(note[\"channel\"])\n if note[\"jump_url\"]:\n if embed_links:\n msg_info += _(\n \"\\n[Click here to jump to message]({})\"\n ).format(note[\"jump_url\"])\n else:\n msg_info += _(\n \"\\n**Jump To Message:** {}\"\n ).format(note[\"jump_url\"])\n\n note_info = _(\n \"{}\\n\\n**Note:**\\n```{}```\\n**Reason:**\\n```{}```\"\n ).format(\n msg_info,\n note[\"note\"],\n note[\"reason\"]\n ).strip()\n\n if embed_links:\n page = discord.Embed(\n colour=0xff0000,\n description=note_info,\n title=_(\"{} TvM Note #{}\").format(author_str, number),\n timestamp=ctx.message.created_at\n )\n await ctx.send(embed=page)\n else:\n page = _(\n \"**{author} TvM Note #{number}**\"\n \"\\n\\n{note}\"\n ).format(\n author=author_str,\n number=number,\n note=note_info\n )\n await ctx.send(page)",
"def _build_notes(content):\n notes_title = nodes.subtitle(text=\"Notes:\")\n notes = nodes.bullet_list()\n\n content.append(notes_title)\n content.append(notes)\n\n for note in [\"This document is a continuous work in progress\"]:\n item = nodes.list_item()\n item.append(nodes.strong(text=note))\n notes.append(item)",
"def post(self):\n note = '%s %s' % (\n context.form.get('title'),\n context.form.get('content')\n )\n notes.append(note)\n return note",
"async def new(self, ctx):\n author_id = ctx.message.author.id\n record = await Mongo.get_record('embed', 'embed_owner', author_id)\n if record is None:\n upg = {\n \"embed_owner\": author_id,\n \"author\": \"\",\n \"description\": \"\",\n \"field_1\": \"\",\n \"name_1\": \"\",\n \"field_2\": \"\",\n \"name_2\": \"\",\n \"field_3\": \"\",\n \"name_3\": \"\",\n \"field_4\": \"\",\n \"name_4\": \"\",\n \"set_image\": \"\",\n \"footer\": \"\"\n }\n await Mongo.record_insert('embed', upg)\n await ctx.send(\"Создание вашего личного embed успешно.:white_check_mark:\")\n else:\n await ctx.send(\"У вас уже есть свои личный embed\\nВы можете использовать 'em clear', для его очистки.\")",
"def _get_body(self):\n\n bodyWrap = (\n u\"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\"\n u\"<!DOCTYPE en-note SYSTEM \\\"http://xml.evernote.com/pub/enml2.dtd\\\">\"\n u\"<en-note>{body}</en-note>\"\n )\n att_enml = \"\\n\".join(self.embed_resources)\n\n return bodyWrap.format(body=att_enml)",
"def note(self):\n content = sys.argv[2]\n self.record('NOTE %s' % content)\n print('Note added')",
"def note(self, target, mask, args):\n if mask.is_channel:\n pubmsg = True\n else:\n pubmsg = False\n\n if args['<name>'] == self.bot.nick:\n self.msg(mask, target, \"You can't leave notes for me, silly :)\")\n return\n\n newmemo = Memo(\n {\n 'sender': target.nick.lower(),\n 'recipient': args['<name>'].lower(),\n 'public': pubmsg,\n 'timestamp': ctime(),\n 'text': ' '.join(args['<text>'])\n }\n )\n newmemo.save(self.db)\n self.db.commit()\n\n confirmation_msg = \"Your note for %s has been queued for delivery.\" % args['<name>']\n self.msg(mask, target, confirmation_msg)",
"def _build_about_embed(self) -> discord.Embed:\n with self.about_aoc_filepath.open(\"r\", encoding=\"utf8\") as f:\n embed_fields = json.load(f)\n\n about_embed = discord.Embed(title=self._base_url, colour=Colours.soft_green, url=self._base_url)\n about_embed.set_author(name=\"Advent of Code\", url=self._base_url)\n for field in embed_fields:\n about_embed.add_field(**field)\n\n about_embed.set_footer(text=f\"Last Updated (UTC): {datetime.utcnow()}\")\n\n return about_embed",
"def show_release_notes_for(self, data):\n target = re.search('show release notes for (.*)', self.message_text).group(1)\n raw_notes = json.load(open(RELEASE_NOTES))\n message = ''\n for version, notes in raw_notes.items():\n if version == target:\n message += '\\n\\n**' + version + '**\\n\\n' + notes\n break\n else:\n message = '\"' + str(target) + '\" is not a valid release. Please use one of:\\n\\n- ' + '\\n- '.join(raw_notes.keys())\n self.create_message(\n message,\n roomId=data['roomId']\n )",
"def send_note(name,donation,note):\n print()\n print(note.format(name,donation))\n print()",
"async def _make_embed_reddit_details(\n self, ctx: commands.Context, sub: str, name: str, emoji: str\n ):\n try:\n url, subr, author, title, post = await asyncio.wait_for(\n self._get_reddit_imgs_details(ctx, sub=sub), 3\n )\n except asyncio.TimeoutError:\n await ctx.send(\n \"Failed to get an image.\\n\"\n \"(Timeout error, it most likely means that Reddit API haven't returned images for 3 seconds)\"\n )\n return\n if not url:\n return\n em = \"\" # FIXME That thing is dumb.\n if url.endswith(GOOD_EXTENSIONS):\n em = await self._embed(\n color=await ctx.embed_colour(),\n title=(_(\"Here is {name} ... \") + emoji).format(name=name),\n description=(\n _(\n \"**Reddit Author:** {author}\\n**Title:** {title}\\n\"\n \"**[Link if you don't see image]({url})\\n[Link of Reddit post]({post})**\"\n )\n ).format(author=author, title=title, url=url, post=post),\n image=url,\n footer=_(\"Requested by {req} • From r/{r}\").format(\n req=ctx.author.display_name, r=subr\n ),\n )\n elif url.startswith(\"https://gfycat.com\"):\n em = (\n _(\"Here is {name} gif ... \")\n + emoji\n + _(\n \"\\n{url}\\n\\n**Reddit Author:** {author}\\n**Title:** {title}\\n\"\n \"Requested by {req} • From r/{r}\\n**Link of Reddit post** {post}\"\n )\n ).format(\n url=url,\n name=name,\n author=author,\n title=title,\n req=bold(ctx.author.display_name),\n r=bold(subr),\n post=post,\n )\n return em"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds and sends an embed message with merge request information. | async def process_merge_request_hook(data: models.MergeRequestHook):
project = data.project
merge = data.merge_request
user = data.user
description = ""
action = "Issue updated"
colour = discord.Colour.light_grey()
if merge.action == "open":
action = "Merge request opened"
description = merge.description
colour = discord.Colour.dark_green()
elif merge.action == "close":
action = "Merge request closed"
colour = discord.Colour.dark_grey()
embed = discord.Embed(title=f"[{project.namespace}/{project.name}] {action}: !{merge.iid} {merge.title}",
url=merge.url, description=description, colour=colour)
embed.set_author(name=user.username, icon_url=user.avatar_url)
embed.set_footer(text=f"{merge.source_branch} → {merge.target_branch}")
await send_message(None, embed=embed) | [
"def build_commit_msg(author, reviewers, source_branch, target_branch,\n commit_message, mp_web_link):\n return \"Merge {} into {} [a={}] [r={}]\\n\\n{}\\n\\nMP: {}\".format(\n source_branch, target_branch, author,\n reviewers, commit_message, mp_web_link)",
"async def build_embed(cls, entity, client, event, message_jump_url, detailed):\n if detailed:\n await cls.update_entity_details(entity, client)\n embed = cls.build_embed_detailed(entity)\n extra_fields_set = copy_extra_fields(embed, event)\n if extra_fields_set:\n return embed\n \n else:\n embed = create_base_embed(entity, None if entity.url is None else 'Click to open')\n \n add_embed_footer(embed, entity)\n add_embed_author(embed, event, cls.name, message_jump_url)\n return embed",
"async def on_submit(self, interaction: Interaction) -> None:\n await interaction.response.defer()\n await self.embed_view.update_embed(self.message, content=self.content.value)",
"def build_embed(self, source_object) -> discord.Embed:\n url, location, first_line = self.get_github_url(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n help_cmd = self.bot.get_command(\"help\")\n description = help_cmd.help\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, ModuleType):\n title = f\"Extension: {source_object.__name__}.py\"\n description = discord.Embed.Empty\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = discord.Embed(title=title, description=description, colour=0x87CEEB)\n embed.add_field(name=\"Source Code\", value=f\"[Here's the Github link!]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed",
"async def update_embed(self) -> None:\n\n self.embed = build_actions_embed(LoggingActions.all_enabled_actions(self.bits))\n await self.message.edit(embed=self.embed)",
"def send_pull_request_merged(self, model: PullRequestNotification):\n self._send_notification(\n title=\"Pull request merged\",\n message=f\"{model.name} has been merged in {model.org}/{model.repository}\",\n audio=Sound.FAILURE.value,\n )",
"def slackbuild_pubsub(data, context):\n global config\n global slack\n\n print(data)\n print(context)\n\n build, template = BuildStatus.toMessage(data, config)\n\n msg = slack.render_message(build, template)\n\n return slack.post_message(msg)",
"def build_message(self):\n if isinstance(self.body, dict):\n self.body_str = self.build_body()\n else:\n if self.body is not None:\n self.body_str = self.body\n\n self.update_content_length(self.body_str)\n\n message = \"\"\n\n # Add method, path and params\n message = \"%s %s%s %s\\r\\n\" % (\n self.method,\n self.path,\n self.build_parameters(),\n self.http_version,\n )\n\n # Add headers\n for k, v in self.headers.items():\n message = message + \"%s: %s\\r\\n\" % (k, v)\n message = message + \"\\r\\n\"\n\n # Add body\n message = message + self.body_str.decode(\n \"utf-8\", \"replace\"\n )\n\n self.message_info.setRequest(message)",
"async def build_options_embed():\n embed = helper.embed_builder(\n bot.user.name,\n \"Hello, my friend! I am Valron. Here are the supported \"\n + \"classes and HP modifiers for your reference\",\n )\n embed.add_field(\n name=\"List of supported classes\",\n value=helper.alias_builder(constants.DND_ALIASES()),\n inline=False,\n )\n embed.add_field(\n name=\"List of supported HP modifiers\",\n value=helper.alias_builder(constants.HP_MOD_ALIASES()),\n inline=False,\n )\n embed.set_footer(\n text=\"/help - main help command\\n\" + \"/links - to view some helpful links\"\n )\n\n return embed",
"async def embedbuilder(self, ctx) -> None:\n em = EmbedBuilderMenu()\n await em.start(ctx)",
"async def sample_embed(self, ctx: ct.ctxType):\n embed = discord.Embed(\n title=\"Sample Embed\",\n url=\"https://youtu.be/dQw4w9WgXcQ\",\n description=\"This is a sample embed.\",\n colour=discord.Colour.dark_blue(),\n )\n embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar.url)\n\n embed.set_thumbnail(url=ctx.author.avatar.url)\n\n embed.add_field(\n name=\"Field1\", value=\"Value under Field1, inline=False\", inline=False\n )\n embed.add_field(\n name=\"Field2\", value=\"Value under Field2, inline=True\", inline=True\n )\n embed.add_field(\n name=\"Field3\", value=\"Value under Field3, inline=True\", inline=True\n )\n\n embed.set_footer(\n text=f\"Requested by {ctx.author.name}\", icon_url=ctx.author.avatar.url\n )\n\n await ctx.reply(embed=embed)",
"async def _create_embed(self, event, info):\n\n e = discord.Embed(url=info.get(\"url\"))\n e.title = \"%s %s!\" % (info.get(\"streamer\"), info.get(\"live_status\"))\n e.add_field(name=\"Stream title\", value=info.get(\"title\"), inline=False)\n e.add_field(name=\"Begin:\", value=event.begin.format(\"HH:mm:ss ZZZ\") + \" (\" + event.begin.humanize() + \")\", inline=False)\n e.add_field(name=\"Duration: \", value=str(event.duration), inline=False)\n #e.add_field(name=\"Link\", value=info.get(\"url\"), inline=False)\n e.set_image(url=info.get(\"thumbnail\") or e.Empty)\n return e",
"async def build_links_embed():\n embed = helper.embed_builder(\n bot.user.name,\n \"Hello, my friend! I am Valron. My wife has compiled a \"\n + \"list of helpful links for you.\",\n )\n embed.add_field(\n name=\"Invite me to your server with this link\",\n value=f\"[Click me!]({constants.DISCORD_INVITE_LINK})\",\n inline=False,\n )\n embed.add_field(\n name=\"Find out what's new with me from the support discord server\",\n value=f\"[Click me!]({constants.SUPPORT_SERVER_LINK})\",\n inline=False,\n )\n embed.add_field(\n name=\"See how I was made\",\n value=f\"[Click me!]({constants.GITHUB_LINK})\",\n inline=False,\n )\n embed.add_field(\n name=\"Want to support me and my wife?\",\n value=f\"Click any of these: [PayPal]({constants.PAYPAL_LINK}) \"\n + f\"| [Ko-Fi]({constants.KOFI_LINK}) | [GCash]({constants.GCASH_QR_CODE})\",\n inline=False,\n )\n embed.set_footer(\n text=\"/help - main help command\\n\"\n + \"/options - to see the list of supported classes and HP modifiers\"\n )\n\n return embed",
"def generate_embed(self):\n doc_id = self.documentcloud_id\n doc_sidebar = str(self.sidebar).lower()\n style_embed = '<link rel=\"stylesheet\" type=\"text/css\" href=\"{css}\"></link>'.format(css=EMBED_CSS)\n iframe_embed = '<div><iframe class=\"docpubEmbed\" src=\"https://www.documentcloud.org/documents/{id}.html?sidebar={sidebar}\"></iframe></div>'.format(\n id=doc_id,\n sidebar=doc_sidebar\n ) # style=\"border:none;width:100%;height:500px\" # desktop height 930px, mobile height 500px\n self.embed_code = style_embed + iframe_embed",
"async def build_help_embed():\n\n embed = helper.embed_builder(\n bot.user.name,\n \"Hello, my friend! I am Valron. Below is a guide on \"\n + \"how I can help you compute for your AL D&D 5e character's hit points.\",\n )\n embed.add_field(\n name=\"Command\",\n value=\"`/hp <con_modifier> <classA#/classB#/etc> [hp_mod1/hp_mod2/etc]`\",\n inline=False,\n )\n embed.add_field(name=\"Single Class Example\", value=\"`/hp 3 fighter1`\", inline=False)\n embed.add_field(\n name=\"Multiclass Example\", value=\"`/hp 3 fighter1/barb2/paladin1`\", inline=False\n )\n embed.add_field(\n name=\"Example with HP modifiers\",\n value=\"`/hp 3 fighter1/barb2/paladin1 tough/hilldwarf`\",\n inline=False,\n )\n embed.set_footer(\n text=\"/options - to see the list of supported classes and HP modifiers\\n\"\n + \"/links - to view some helpful links\"\n )\n\n return embed",
"def _build_message(self, op, params, transmission_id=None):\n if transmission_id is None:\n self.transmission_id = self.transmission_id % 16383 + 1\n else:\n self.transmission_id = transmission_id\n message = {'message': {'transmission_id': [self.transmission_id], 'op': op, 'parameters': dict(params)}}\n return json.dumps(message)",
"async def create_embed(self, author, author_message):\n embed = Embed(colour=author.color)\n\n if author_message.clean_content:\n embed.add_field(name=author.display_name, value=f\"{author_message.clean_content}\\n[[jump]]({author_message.jump_url})\")\n\n if author_message.attachments:\n for att in author_message.attachments:\n for ext in self.IMG_EXT:\n if ext in att.filename:\n break\n else:\n for ext in self.VIDEO_EXT:\n if ext in att.filename:\n embed.add_field(name=\"\\u200b\", value=f\"🎞️ {att.filename}\", inline=False)\n break\n else:\n embed.add_field(name=\"\\u200b\", value=f\"📁 {att.filename}\", inline=False)\n break\n break\n embed.set_image(url=f\"{att.url}\")\n\n if author_message.embeds and not author_message.attachments:\n for embed in author_message.embeds:\n embed.clear_fields()\n embed.set_image(url=\"\")\n embed.add_field(name=author.display_name, value=author_message.clean_content)\n\n embed.set_thumbnail(url=author.avatar_url_as(size=32, format='png'))\n\n if not author_message.clean_content:\n embed.add_field(name=\"\\u200b\", value=f\"[[jump]]({author_message.jump_url})\", inline=False)\n\n return embed",
"def build_message(self):\n self.update_content_length(self.body)\n\n message = \"\"\n message = \"%s %s %s\\r\\n\" % (\n self.http_version,\n self.response_code,\n self.response_value,\n )\n\n # Add headers\n for k, v in self.headers.items():\n message = message + \"%s: %s\\r\\n\" % (k, v)\n message = message + \"\\r\\n\"\n\n # Add body\n message = message + self.body.decode(\n \"utf-8\", \"replace\"\n )\n\n self.message_info.setResponse(message)",
"async def new(self, ctx):\n author_id = ctx.message.author.id\n record = await Mongo.get_record('embed', 'embed_owner', author_id)\n if record is None:\n upg = {\n \"embed_owner\": author_id,\n \"author\": \"\",\n \"description\": \"\",\n \"field_1\": \"\",\n \"name_1\": \"\",\n \"field_2\": \"\",\n \"name_2\": \"\",\n \"field_3\": \"\",\n \"name_3\": \"\",\n \"field_4\": \"\",\n \"name_4\": \"\",\n \"set_image\": \"\",\n \"footer\": \"\"\n }\n await Mongo.record_insert('embed', upg)\n await ctx.send(\"Создание вашего личного embed успешно.:white_check_mark:\")\n else:\n await ctx.send(\"У вас уже есть свои личный embed\\nВы можете использовать 'em clear', для его очистки.\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that represents the window which Character Mods can be applied. | def chars_window():
path_dir = r'Sor_Mods_Storage\chars'
char_mods_dict = sor_module.list_char_mods(path_dir=path_dir)
# Loading Images to screen
chars = tk.Toplevel()
mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))
imgRandom_label = tk.Label(chars, image=mainTitleImg)
title = tk.Label(chars, text="Characters Mods")
comboBox_chars = ttk.Combobox(chars, values=list(char_mods_dict.keys()))
def apply_char_mod():
char_selected = comboBox_chars.get()
result_window = tk.Toplevel()
value = ''
if char_selected == '':
value = f'{value} Please Select an Mod to Apply!'
else:
sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='chars')
value = f'Character Mod {char_selected} applied!'
result_label = tk.Label(result_window, text=value)
result_label.pack()
btn_apply = tk.Button(chars, text='Apply', command=apply_char_mod)
title.grid(row=0, column=0)
comboBox_chars.grid(row=1, column=0)
imgRandom_label.grid(row=1, column=1)
btn_apply.grid(row=2, column=0) | [
"def extend_window(self):\r\n # create a regular expression pattern to find the beginning/end of the sentence\r\n left_pattern = re.compile(r'[A-ZА-Яa-zа-я] [.!?]')\r\n right_pattern = re.compile(r'[.!?] [A-ZА-Яa-zа-я]')\r\n leftcontext = self.string[:self.start+1][::-1]\r\n rightcontext = self.string[self.end:]\r\n # scan through string looking for a match to the pattern\r\n left = re.search(left_pattern, leftcontext)\r\n right = re.search(right_pattern, rightcontext)\r\n # determine the boundaries of context window\r\n if left is None:\r\n self.start = 0\r\n else:\r\n self.start = self.start - left.start()\r\n if right == None:\r\n self.end = len(self.string)\r\n else:\r\n self.end = self.end + right.start() + 1",
"def window_function(self):\n return self._wndfnc, self._wndfnc_norm",
"def inl_window(**kwargs):\n return _fixed_window(**kwargs)",
"def get_window(self): # real signature unknown; restored from __doc__\n pass",
"def brain_window(volumenode):\n displaynode = volumenode.GetDisplayNode()\n displaynode.AutoWindowLevelOff()\n displaynode.SetWindowLevel(100, 50)",
"def box_window(**kwargs):\n return _fixed_window(**kwargs)",
"def _get_window_width(self):",
"def render_to_window(self):",
"def make_windows(text, window_size=3):\n windows = []\n for msg_idx, line in enumerate(text): # count through each line of text\n print()\n print('Message index:', msg_idx)\n print('Message:', line)\n for idx in range(len(line) - window_size + 1): # slide a window along the line until it reaches the end\n window = line[idx: idx + window_size] # get the words that fall into that window as a list\n print('Window idx:', idx, '\\twindow:', window)\n windows.append(window) # add that list of tokens in a window to the list of windows\n print(windows)\n return windows",
"def modifiers_coding_map_creator(self):\n self.mapCreatorWindow = map_creator.ModifiersMapCreatorWindow()\n self.mapCreatorWindow.move(self.pos())\n self.mapCreatorWindow.resize(CODING_MAP_RESIZE_W, CODING_MAP_RESIZE_H)\n self.mapCreatorWindow.show()",
"def window(windowX, windowY, occurrency):\n\tdef window0(dx, dy, dz):\n\n\t\tresizeXY(windowX,windowY,occurrency, dx, dz)\n\n\t\tmodel = []\n\t\tfor xIndex in range(len(windowX)):\n\t\t\tyQuotes = []\n\t\t\txSum = sum(windowX[:xIndex])\n\t\t\tfor yIndex in range(len(windowY)):\n\t\t\t\tif(occurrency[xIndex][yIndex] == False):\n\t\t\t\t\tyQuotes.append(-windowY[yIndex])\n\t\t\t\telse:\n\t\t\t\t\tyQuotes.append(windowY[yIndex])\n\t\t\tmodel.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(yQuotes)]))\n\n\t\tresult = STRUCT(model)\n\t\tresult = MAP([S2,S3,S1])(PROD([result, Q(dy)]))\n\t\twindowFrame = STRUCT([result])\n\t\twindowFrame = TEXTURE([\"iron.jpg\"])(windowFrame)\n\n\t\tglass = CUBOID([SIZE([1])(result)[0]*0.98,0.001,SIZE([3])(result)[0]*0.95])\n\t\tglass = T([1,2,3])([dx*0.005, dy/2, 0.01])(glass)\n\t\tglass = TEXTURE([\"glass2.jpg\"])(glass) \n\n\t\twindow = STRUCT([windowFrame, glass])\n\t\twindow = S([1,2,3])([dx/SIZE([1])(window)[0], dy/SIZE([2])(window)[0], dz/SIZE([3])(window)[0]])(window)\n\t\t\n\t\treturn window\n\n\treturn window0",
"def _feature_window_function(window_type, window_size, blackman_coeff):\n if window_type == HANNING:\n return torch.hann_window(window_size, periodic=False)\n elif window_type == HAMMING:\n return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46)\n elif window_type == POVEY:\n # like hanning but goes to zero at edges\n return torch.hann_window(window_size, periodic=False).pow(0.85)\n elif window_type == RECTANGULAR:\n return torch.ones(window_size, dtype=torch.get_default_dtype())\n elif window_type == BLACKMAN:\n a = 2 * math.pi / (window_size - 1)\n window_function = torch.arange(window_size, dtype=torch.get_default_dtype())\n # can't use torch.blackman_window as they use different coefficients\n return blackman_coeff - 0.5 * torch.cos(a * window_function) + \\\n (0.5 - blackman_coeff) * torch.cos(2 * a * window_function)\n else:\n raise Exception('Invalid window type ' + window_type)",
"def renderWindowEditor(*args, autoResize: bool=True, blendMode: Union[int, bool]=0, caption:\n Union[AnyStr, bool]=\"\", changeCommand: Union[List[AnyStr, AnyStr, AnyStr,\n AnyStr], bool]=None, clear: Union[List[int, int, float, float, float],\n bool]=None, cmEnabled: bool=True, colorManage: bool=True, compDisplay:\n Union[int, bool]=0, compImageFile: Union[AnyStr, bool]=\"\", control:\n bool=True, currentCamera: Union[AnyStr, bool]=\"\", currentCameraRig:\n Union[AnyStr, bool]=\"\", defineTemplate: AnyStr=\"\", displayImage:\n Union[int, bool]=0, displayImageViewCount: Union[int, bool]=0,\n displayStyle: Union[AnyStr, bool]=\"\", docTag: Union[AnyStr, bool]=\"\",\n doubleBuffer: bool=True, drawAxis: bool=True, editorName: bool=True,\n exists: bool=True, exposure: Union[float, bool]=0.0, filter:\n Union[AnyStr, bool]=\"\", forceMainConnection: Union[AnyStr, bool]=\"\",\n frameImage: bool=True, frameRegion: bool=True, gamma: Union[float,\n bool]=0.0, highlightConnection: Union[AnyStr, bool]=\"\", loadImage:\n AnyStr=\"\", lockMainConnection: bool=True, mainListConnection:\n Union[AnyStr, bool]=\"\", marquee: Union[List[float, float, float, float],\n bool]=None, nbImages: bool=True, nextViewImage: bool=True,\n outputColorManage: bool=True, panel: Union[AnyStr, bool]=\"\", parent:\n Union[AnyStr, bool]=\"\", pcaption: Union[AnyStr, bool]=\"\", realSize:\n bool=True, refresh: bool=True, removeAllImages: bool=True, removeImage:\n bool=True, resetRegion: bool=True, resetViewImage: bool=True, saveImage:\n bool=True, scaleBlue: Union[float, bool]=0.0, scaleGreen: Union[float,\n bool]=0.0, scaleRed: Union[float, bool]=0.0, selectionConnection:\n Union[AnyStr, bool]=\"\", showRegion: Union[List[int, int], bool]=None,\n singleBuffer: bool=True, snapshot: Union[List[AnyStr, int, int],\n bool]=None, snapshotMode: bool=True, stateString: bool=True, stereo:\n Union[int, bool]=0, stereoImageOrientation: Union[List[AnyStr, AnyStr],\n bool]=None, stereoMode: Union[AnyStr, bool]=\"\", toggle: bool=True,\n unParent: bool=True, unlockMainConnection: bool=True,\n updateMainConnection: bool=True, useTemplate: AnyStr=\"\", viewImageCount:\n Union[int, bool]=0, viewTransformName: Union[AnyStr, bool]=\"\",\n writeImage: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass",
"def newwin(nlines, ncols, begin_y=0, begin_x=0): # real signature unknown; restored from __doc__\n pass",
"def Lwindows(text,L):\n windows=list()\n for i in range (0,len(text)-L+1):\n windows.append(kmer(text,i,L))\n return windows",
"def enemy_window():\n path_dir = r'Sor_Mods_Storage\\enemies'\n enemy_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n enemies = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n\n imgRandom_label = tk.Label(enemies, image=mainTitleImg)\n title = tk.Label(enemies, text=\"Enemies Mods\")\n\n comboBox_enemies = ttk.Combobox(enemies, values=list(enemy_mods_dict.keys()))\n\n def apply_enemy_mod():\n char_selected = comboBox_enemies.get()\n result_window = tk.Toplevel()\n\n value = ''\n if char_selected == '':\n value = f'{value} Please Select an Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='enemies')\n value = f'Enemy Mod {char_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(enemies, text='Apply', command=apply_enemy_mod)\n\n title.grid(row=0, column=0)\n comboBox_enemies.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)",
"def newwin(self, *args):\n background = app.get_colpair('background')\n win = curses.newwin(*args)\n win.bkgd(' ', background)\n win.scrollok(True)\n win.keypad(1)\n # Thanks to Angus Gibson for pointing out this missing line which was causing\n # problems that needed dirty hackery to fix. :)\n return win",
"def createNewWin(curses):\n\n #On dimensionne notre fenetre\n x = config.rawConfig.getint(\"GLOBAL\", \"x\")\n y = config.rawConfig.getint(\"GLOBAL\", \"y\")\n\n #On créer la fenêtre\n win = curses.newwin(x,y,0,0)\n\n #On affecte les différents paramètres a notre fenêtre\n win.keypad(1)\n win.nodelay(1)\n win.border(0)\n\n return win",
"def _create_context_window(self, context_window_size, clear_text):\n if self.offset_answer_start == 0 and self.offset_answer_end == 0:\n return '', 0, 0\n else:\n len_ans = self.offset_answer_end - self.offset_answer_start\n context_window_size = max(context_window_size, len_ans + 1)\n len_text = len(clear_text)\n midpoint = int(len_ans / 2) + self.offset_answer_start\n half_window = int(context_window_size / 2)\n window_start_ch = midpoint - half_window\n window_end_ch = midpoint + half_window\n overhang_start = max(0, -window_start_ch)\n overhang_end = max(0, window_end_ch - len_text)\n window_start_ch -= overhang_end\n window_start_ch = max(0, window_start_ch)\n window_end_ch += overhang_start\n window_end_ch = min(len_text, window_end_ch)\n window_str = clear_text[window_start_ch:window_end_ch]\n return window_str, window_start_ch, window_end_ch"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that represents the window which Enemy Mods can be applied. | def enemy_window():
path_dir = r'Sor_Mods_Storage\enemies'
enemy_mods_dict = sor_module.list_char_mods(path_dir=path_dir)
# Loading Images to screen
enemies = tk.Toplevel()
mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))
imgRandom_label = tk.Label(enemies, image=mainTitleImg)
title = tk.Label(enemies, text="Enemies Mods")
comboBox_enemies = ttk.Combobox(enemies, values=list(enemy_mods_dict.keys()))
def apply_enemy_mod():
char_selected = comboBox_enemies.get()
result_window = tk.Toplevel()
value = ''
if char_selected == '':
value = f'{value} Please Select an Mod to Apply!'
else:
sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='enemies')
value = f'Enemy Mod {char_selected} applied!'
result_label = tk.Label(result_window, text=value)
result_label.pack()
btn_apply = tk.Button(enemies, text='Apply', command=apply_enemy_mod)
title.grid(row=0, column=0)
comboBox_enemies.grid(row=1, column=0)
imgRandom_label.grid(row=1, column=1)
btn_apply.grid(row=2, column=0) | [
"def render_to_window(self):",
"def get_window(self): # real signature unknown; restored from __doc__\n pass",
"def brain_window(volumenode):\n displaynode = volumenode.GetDisplayNode()\n displaynode.AutoWindowLevelOff()\n displaynode.SetWindowLevel(100, 50)",
"def get_classroom_challenge_window(self):\n self.gui.active_window.hide()\n\n self.associated_window = classroom_management_window.ClassroomManagementWindow(self.gui)\n self.gui.active_window = self.associated_window\n\n self.gui.active_window.show()",
"def windowEventHandler(self, window = None): \n if window.isClosed():\n sys.exit()\n \n wp = window.getProperties() \n windowWidth = wp.getXSize() \n windowHeight = wp.getYSize() \n for i in self.playerCameras: #added to allow for changing player number\n #since window size has changed we need to update the aspect ratio and FOV\n i.node().getLens().setAspectRatio(windowWidth/(windowHeight/2)) \n i.node().getLens().setFov(60)",
"def stage_window():\n path_dir = r'Sor_Mods_Storage\\stages'\n stage_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n stages = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(stages, image=mainTitleImg)\n title = tk.Label(stages, text=\"Stage Mods\")\n\n comboBox_chars = ttk.Combobox(stages, values=list(stage_mods_dict.keys()))\n\n def apply_stage_mod():\n stage_selected = comboBox_chars.get()\n result_window = tk.Toplevel()\n\n value = ''\n if stage_selected == '':\n value = f'{value} Please Select an Stage Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=stage_selected, type='stages')\n value = f'Enemy Mod {stage_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(stages, text='Apply', command=apply_stage_mod)\n\n title.grid(row=0, column=0)\n comboBox_chars.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)",
"def box_window(**kwargs):\n return _fixed_window(**kwargs)",
"def on_activate(self, caller):\n self.window = GameWindow()\n self.add_window(self.window)",
"def chars_window():\n path_dir = r'Sor_Mods_Storage\\chars'\n char_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n chars = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(chars, image=mainTitleImg)\n title = tk.Label(chars, text=\"Characters Mods\")\n\n comboBox_chars = ttk.Combobox(chars, values=list(char_mods_dict.keys()))\n\n def apply_char_mod():\n char_selected = comboBox_chars.get()\n result_window = tk.Toplevel()\n\n value = ''\n if char_selected == '':\n value = f'{value} Please Select an Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='chars')\n value = f'Character Mod {char_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(chars, text='Apply', command=apply_char_mod)\n\n title.grid(row=0, column=0)\n comboBox_chars.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)",
"def show(self, window):\r\n\r\n return",
"def get_window(self):\n return self.__window",
"def about_window():\n about.About_Window()",
"def inl_window(**kwargs):\n return _fixed_window(**kwargs)",
"def StartUpWindow():\n\n global Window\n Window = pygame.display.set_mode(WindowDimensions)\n Window.fill(WHITE)\n pygame.display.set_caption(\"Conway's game of life!\")",
"def _create_example_window():\n return Window({\"warning\": False, \"state\": \"close\"})",
"def init_window(self):\n size = self.WINDOW_LENGTH, self.WINDOW_HEIGHT\n screen = pygame.display.set_mode(size)\n caption = \"Connect 5\"\n pygame.display.set_caption(caption)\n\n # Initialize Mouse Area\n self.clear_mouse_area(screen)\n\n # Initialize Board Area\n self.draw_board(screen)\n\n # Initialize Message Box Area with preliminary text.\n if self.control == self.player_one: # player_one is human\n self.refresh_msg_box(screen, self.MSG_YOUR_TURN)\n else: # player_two is not.\n self.refresh_msg_box(screen, self.MSG_AI_TURN)\n return screen",
"def SetupWindow(self):\n\n #VM ADDED\n pti.infant_tobii_controller.leapCalibrateBaby()\n\n # Important to do this first because it gets the windows in the correct order for focus etc.\n\n #VM CHANGED SELF.WIN FULLSCR TO TRUE\n if self.stimPres:\n # Stimulus presentation window\n self.win = visual.Window((self.screenWidth, self.screenHeight), fullscr=True, screen=self.screenIndex, allowGUI=False,\n units='pix', color=self.screenColor)\n self.dummyThing = visual.Circle(self.win, size=1, color=self.win.color) # This is for fixing a display glitch in PsychoPy3 involving multiple windows of different sizes.\n # Coder window\n self.win2 = visual.Window((400, 400), fullscr=False, screen=self.expScreenIndex, allowGUI=True, units='pix', waitBlanking=False,\n rgb=[-1, -1, -1])\n\n #VM ADDED\n self.controller = pti.infant_tobii_controller(self.win)\n self.controller.start_recording('data/toerase2.tsv', embed_event = True)\n\n if self.stimPres:\n tempText = visual.TextStim(self.win2, text=\"Loading Stimuli\", pos=[0, 0], color='white', bold=True, height=40)\n tempText.draw()\n self.win2.flip()\n # Step 1: Load and present \"startImage\"\n if self.startImage is not '':\n self.dummyThing.draw()\n tempStim = self.stimList[self.startImage]\n tempStimObj = visual.ImageStim(self.win, tempStim['stimLoc'], size=[self.movieWidth, self.movieHeight])\n tempStimObj.draw()\n self.win.flip() # This should now be on the screen until the first attngetter\n self.stimDict = {x: [] for x in self.stimNames.keys()} # This holds all the loaded movies.\n self.counters = {x: 0 for x in self.stimNames.keys()} # list of counters, one per index of the dict, so it knows which movie to play\n tempCtr = {x: 0 for x in self.stimNames.keys()}\n for i in self.actualTrialOrder:\n # Adjust for hab sub-trials. Looks for a very specific set of traits, which could occur, but...shouldn't.\n if '.' in i:\n tempI = i\n while '.' in tempI:\n tempI = tempI[tempI.index('.')+1:]\n i = tempI\n x = tempCtr[i] # Changed so hab trials get the same treatment as everything else.\n if x < len(self.stimNames[i]):\n tempStim = self.stimList[self.stimNames[i][x]]\n if tempStim['stimType'] == 'Movie':\n tempStimObj = visual.MovieStim3(self.win, tempStim['stimLoc'],\n size=[self.movieWidth, self.movieHeight], flipHoriz=False,\n flipVert=False, loop=False)\n elif tempStim['stimType'] == 'Image':\n tempStimObj = visual.ImageStim(self.win, tempStim['stimLoc'],\n size=[self.movieWidth, self.movieHeight])\n elif tempStim['stimType'] == 'Audio':\n tempStimObj = sound.Sound(tempStim['stimLoc'])\n else: # The eternal problem of audio/image pair. Just creates an object that's a dict of audio and image.\n audioObj = sound.Sound(tempStim['audioLoc'])\n imageObj = visual.ImageStim(self.win, tempStim['imageLoc'],\n size=[self.movieWidth, self.movieHeight])\n tempStimObj = {'Audio': audioObj, 'Image': imageObj}\n tempAdd = {'stimType':tempStim['stimType'], 'stim':tempStimObj}\n self.stimDict[i].append(tempAdd)\n tempCtr[i] += 1\n\n if len(list(self.playAttnGetter.keys())) > 0:\n for i in list(self.attnGetterList.keys()):\n if self.attnGetterList[i]['stimType'] == 'Audio':\n self.attnGetterList[i]['file'] = sound.Sound(self.attnGetterList[i]['stimLoc'])\n else:\n self.attnGetterList[i]['file'] = visual.MovieStim3(self.win, self.attnGetterList[i]['stimLoc'],\n size=[self.movieWidth, self.movieHeight],\n flipHoriz=False, flipVert=False, loop=False)\n if self.endImage is not '': # Load image for end of experiment, if needed.\n tempStim = self.stimList[self.endImage]\n self.endImageObject = visual.ImageStim(self.win, tempStim['stimLoc'], size=[self.movieWidth, self.movieHeight])\n else:\n self.endImageObject = None\n self.keyboard = self.key.KeyStateHandler()\n self.win2.winHandle.push_handlers(self.keyboard)\n if self.stimPres:\n self.win.winHandle.push_handlers(self.keyboard)\n self.baseSize = 40 # Base size of all attention-getters, in pixels\n self.attnGetterSquare = visual.Rect(self.win, height=self.baseSize, width=self.baseSize, pos=[self.testOffset + 0, 0], fillColor='black')\n self.attnGetterCross = visual.ShapeStim(self.win, vertices='cross', size=self.baseSize, pos=[self.testOffset + 0, 0], fillColor='black')\n\n numVertices = 10\n starRad = self.baseSize #This creates a large but static rotating star. It does not loom.\n starVerts = []\n for x in range(0,numVertices):\n if x % 2 == 1:\n tempRad = starRad*.55 # How much to draw in between the \"points\"\n else:\n tempRad = starRad\n tempVert = [tempRad*sin((2*pi)/numVertices * x), tempRad*cos((2*pi)/numVertices * x)]\n starVerts.append(tempVert)\n\n self.attnGetterStar = visual.ShapeStim(self.win, vertices=starVerts, pos=[self.testOffset + 0, 0], fillColor='black')\n\n self.statusSquareA = visual.Rect(self.win2, height=80, width=80,\n pos=[self.statusOffset - 60, self.statusOffsetY + 0],\n fillColor='black') # These two appear on the status screen window.\n self.statusSquareB = visual.Rect(self.win2, height=80, width=80,\n pos=[self.statusOffset + 60, self.statusOffsetY + 0], fillColor='black')\n self.statusTextA = visual.TextStim(self.win2, text=\"\", pos=[self.statusOffset - 60, self.statusOffsetY + 0],\n color='white', bold=True, height=30)\n self.statusTextB = visual.TextStim(self.win2, text=\"\", pos=[self.statusOffset + 60, self.statusOffsetY + 0],\n color='white', bold=True, height=30)\n self.trialText = visual.TextStim(self.win2, text=\"Trial no: \", pos=[-100, 150], color='white')\n self.readyText = visual.TextStim(self.win2, text=\"Trial not active\", pos=[-25, 100], color='white')\n \n #VM ADDED\n print('experiment starting...')\n \n self.doExperiment() # Get this show on the road!",
"def ctx():\n window = create_window()\n arcade.set_window(window)\n prepare_window(window)\n return window.ctx",
"def initiate_lobby(root):\n room_window = LobbyUI(root)\n root.geometry('%dx%d' % (LOBBY_WIDTH, LOBBY_HEIGHT))\n LOG.debug('Kick up the 4d3d3d3.')\n return room_window"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that represents the window which Stage Mods can be applied. | def stage_window():
path_dir = r'Sor_Mods_Storage\stages'
stage_mods_dict = sor_module.list_char_mods(path_dir=path_dir)
# Loading Images to screen
stages = tk.Toplevel()
mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))
imgRandom_label = tk.Label(stages, image=mainTitleImg)
title = tk.Label(stages, text="Stage Mods")
comboBox_chars = ttk.Combobox(stages, values=list(stage_mods_dict.keys()))
def apply_stage_mod():
stage_selected = comboBox_chars.get()
result_window = tk.Toplevel()
value = ''
if stage_selected == '':
value = f'{value} Please Select an Stage Mod to Apply!'
else:
sor_module.apply_mod(mod_dir=path_dir, mod=stage_selected, type='stages')
value = f'Enemy Mod {stage_selected} applied!'
result_label = tk.Label(result_window, text=value)
result_label.pack()
btn_apply = tk.Button(stages, text='Apply', command=apply_stage_mod)
title.grid(row=0, column=0)
comboBox_chars.grid(row=1, column=0)
imgRandom_label.grid(row=1, column=1)
btn_apply.grid(row=2, column=0) | [
"def get_window(self): # real signature unknown; restored from __doc__\n pass",
"def box_window(**kwargs):\n return _fixed_window(**kwargs)",
"def brain_window(volumenode):\n displaynode = volumenode.GetDisplayNode()\n displaynode.AutoWindowLevelOff()\n displaynode.SetWindowLevel(100, 50)",
"def render_to_window(self):",
"def get_window(self):\n return self.__window",
"def inl_window(**kwargs):\n return _fixed_window(**kwargs)",
"def windowEventHandler(self, window = None): \n if window.isClosed():\n sys.exit()\n \n wp = window.getProperties() \n windowWidth = wp.getXSize() \n windowHeight = wp.getYSize() \n for i in self.playerCameras: #added to allow for changing player number\n #since window size has changed we need to update the aspect ratio and FOV\n i.node().getLens().setAspectRatio(windowWidth/(windowHeight/2)) \n i.node().getLens().setFov(60)",
"def show(self, window):\r\n\r\n return",
"def window(master=None, modal=False):\r\n global counter\r\n counter +=1; win = Win(master, title=counter, op=2)\r\n # use specific 'text' and 'bg' according to window type (master, slave, modal)\r\n if master is None: text, bg = 'MASTER', '#0F0'\r\n elif modal: text, bg = 'MODAL of window %s' % master.title, '#F00'\r\n else: text, bg = 'SLAVE of window %s' % master.title, '#FF0'\r\n Label(win, text=text, bg=bg)\r\n Button(win, text='Create master window', command=lambda: window())\r\n Button(win, text='Create slave window', command=lambda: window(win))\r\n Button(win, text='Create modal window', command=lambda: window(win, True))\r\n Button(win, text='Kill me and all my slaves', command=win.exit)\r\n # modal window (= blocking window) requires win.wait() instead of win.loop()\r\n win.wait() if modal else win.loop()",
"def _get_window_width(self):",
"def window_handle():\r\n return _base._rsf.window_handle()",
"def window(self):\n return self.attribute('VW')",
"def window_function(self):\n return self._wndfnc, self._wndfnc_norm",
"def workflow_slide_detection_window():\n\n return SelectSlide",
"def show_window_fields(self):\n self.analyze()\n items = []\n for ba in analyzer.window_actions:\n items.append(\n \"{0} : {1}\".format(\n ba.full_name(), layout_fields(ba)))\n\n return rstgen.ul(items)",
"def get_parent_window(self): # real signature unknown; restored from __doc__\n pass",
"def update_window(self):\n self.hwin = self._get_pid(self.name)\n self.window = self._get_window_dim() if self.hwin else self.monitors[0]",
"def WidgetShow(self, Widget, Parent=None):\r\n if self.WindowName == 'field':\r\n print(\"Field Window was created and dropped\")\r\n Result = Canvas(Parent,height=270,width=290,bg='white')\r\n fieldWindow = FieldWindow(Parent)\r\n Result.create_window(150,140,window=fieldWindow,anchor='center')\r\n elif self.WindowName == 'startField':\r\n print(\"Start Field Window was created and dropped\")\r\n Result = Canvas(Parent,height=130,width=300,bg='white')\r\n startFieldWindow = StartFieldWindow(Parent)\r\n Result.create_window(155,70,window=startFieldWindow,anchor='center')\r\n elif self.WindowName == 'end':\r\n print(\"End Window was created and dropped\")\r\n Result = Canvas(Parent, height=35,width=80,bg='white')\r\n endWindow = EndFieldWindow(Parent)\r\n Result.create_window(45,25,window=endWindow,anchor='center')\r\n elif self.WindowName == 'packetInfo':\r\n print(\"Packet Information Window was created and dropped\")\r\n Result = Canvas(Parent,height=110,width=350,bg='white')\r\n packetInfoWindow = PacketInformationWindow(Parent)\r\n Result.create_window(180,60,window=packetInfoWindow,anchor='center')\r\n elif self.WindowName == 'referenceList':\r\n print(\"Reference List Window was created and dropped\")\r\n Result = Canvas(Parent,height=130,width=350,bg='white')\r\n referenceListWindow = ReferenceListWindow(Parent)\r\n Result.create_window(180,70,window=referenceListWindow,anchor='center')\r\n else: \r\n Result = Widget \r\n #set it's attributes appropriately \r\n #Result['text'] = '%s '%self.Name\r\n Result['relief'] = RAISED\r\n Result['borderwidth'] = 2\r\n return Result",
"def chars_window():\n path_dir = r'Sor_Mods_Storage\\chars'\n char_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n chars = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(chars, image=mainTitleImg)\n title = tk.Label(chars, text=\"Characters Mods\")\n\n comboBox_chars = ttk.Combobox(chars, values=list(char_mods_dict.keys()))\n\n def apply_char_mod():\n char_selected = comboBox_chars.get()\n result_window = tk.Toplevel()\n\n value = ''\n if char_selected == '':\n value = f'{value} Please Select an Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='chars')\n value = f'Character Mod {char_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(chars, text='Apply', command=apply_char_mod)\n\n title.grid(row=0, column=0)\n comboBox_chars.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
delete the specified intentfrom your account. | def delete_intent(intent_name):
try:
client.get_intent(
name=intent_name,
versionOrAlias='$LATEST'
)
answer=raw_input("Do you want to delete %s from your account(Y/y for YES, other NO):" %(intent_name))
if answer in ['Y', 'y']:
client.delete_intent(
name=intent_name
)
print "You chose to delete the intent %s, deleted..." %(intent_name)
else:
print "You chose not to delete the inten t%s, exiting..." %(intent_name)
except:
print "There is no intent called %s, exiting..." %(intent_name)
return | [
"def delete_intent(project_id, intent_id):\n\n intents_client = dialogflow.IntentsClient()\n intent_path = intents_client.intent_path(project_id, intent_id)\n intents_client.delete_intent(request={\"name\": intent_path})",
"def delete_intent(project_id, intent_id):\n\n intents_client = dialogflow.IntentsClient()\n\n intent_path = intents_client.intent_path(project_id, intent_id)\n\n intents_client.delete_intent(request={\"name\": intent_path})",
"def delete_account():\n pass",
"def delete_account(self, account):\n \n pass",
"def remove_intent(self):\n intent = self.data.intents[sp.sender]\n Utils.execute_token_transfer(self.data.token_address, sp.self_address, sp.sender, self.data.token_id, intent.token_amount)\n del self.data.intents[sp.sender]",
"def delete(self, args):\n try:\n db = get_db('intents')\n intents = db.delete_intent(args['intent'])\n resp = jsonify(intents=intents)\n resp.status_code = 200\n return resp\n except DatabaseError as error:\n resp = jsonify(error=error)\n resp.status_code = 500\n return resp\n except DatabaseInputError as error:\n resp = jsonify(error=error)\n resp.status_code = 400\n return resp",
"def deleteIntent(self, nameIntent):\n if nameIntent in self.dicIntents:\n del self.dicIntents[nameIntent]\n self.ouput.exec('Se ha eliminado \"'+nameIntent+'\" de la lista de Intenciones del ChatBot \"'+self.name+'\".')\n\n if not(self.currentIntent is None) and nameIntent == self.currentIntent.tag:\n self.currentIntent = None # reinicio del atributo\n self.ouput.exec('\"'+nameIntent+'\" ha dejado se ser la intención actual.')\n else:\n self.ouput.exec('No existe \"'+nameIntent+'\" en la lista de Intenciones del ChatBot \"'+self.name+'\".')",
"def delete_account(details):\n details.delete_account()",
"def delete_activity():\n pass",
"def delete_account(AccountId=None):\n pass",
"def delete_account(self):\n Credential.account_list.remove(self)",
"def delete_account(self):\n Account.account_details.remove(self)",
"def delete_account(self):\n Credentials.accounts.remove(self)",
"def delete_account(self):\n signals.before_gameaccount_deleted.send(gameaccount=self.gameaccount)\n db.delete(self.gameaccount)",
"def del_account(self):\n\n self.delete_sync_token()",
"def delete(self, account: str, memo: Optional[str], memo_type: Optional[str]):\n pass",
"def account_delete(request):\n fields = [\"email\", \"token\"]\n\n # serializes the quert string to a dict (neeto)\n args = request.args\n\n query_validation = validate_query_params(args, fields)\n # check that body validation succeeded\n if query_validation[1] != 200:\n return query_validation\n\n auth = azure_refresh_token(args[\"token\"])\n if not auth[0]:\n return http400(\"Not Authenticated\")\n\n account_db = Database(\"accounts\")\n storage = Storage(\"biit_profiles\")\n try:\n account_db.delete(args[\"email\"])\n storage.delete(args[\"email\"] + \".jpg\")\n return http200(\"Account deleted\")\n except:\n return http400(\"Error in account deletion\")",
"def delete(self, sender, dest, arg):\n insult = Insult.get(int(arg)) \n db.session.delete(insult)\n db.session.commit()\n helpers.msgOK(self.client, dest, parse_name(sender)[0])",
"def delete(self, data):\n url = self.base_url + '/v2/account/delete/'\n return self._call_vendasta(url, data)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
demo function to get the intent's latest configuration | def get_intent_configuration(intent_name, version ="$LATEST"):
response=client.get_intent(
name=intent_name,
version=version
)
return response | [
"def get_config (self):\n # ret = core.adaptation.controller_adapter.domains.components[\n # 'OPENSTACK'].rest_adapter.get_config()\n # print \"Return: \", ret\n # print core.adaptation.controller_adapter.domains.components[\n # 'OPENSTACK'].rest_adapter._response.text\n pass",
"def get_config():\n return CONFIG",
"def get_config(self):\n return self.full_config",
"def config(self) -> \"AutomationConfig\":",
"def get_config(self,config):\n return self.parser.get(\"main\", config)",
"def get_extra_config_sample():\n pass",
"def get_config(self):\n iq_cmd = self.Iq()\n iq_cmd['type'] = 'get'\n action_cmd = ET.Element('oa')\n action_cmd.attrib['xmlns'] = 'connect.logitech.com'\n action_cmd.attrib['mime'] = (\n 'vnd.logitech.harmony/vnd.logitech.harmony.engine?config')\n iq_cmd.set_payload(action_cmd)\n result = iq_cmd.send(block=True)\n payload = result.get_payload()\n assert len(payload) == 1\n action_cmd = payload[0]\n assert action_cmd.attrib['errorcode'] == '200'\n device_list = action_cmd.text\n return json.loads(device_list)",
"def _est_config(self):\n return self._est_method.config",
"def get_config():\n app = NbConvertApp()\n app.load_config_file()\n return app.config",
"def app_config(self):\n return self._app_conf[\"aiscalator\"]",
"def config(self):\n return local.config()",
"def _get_config_dict():\r\n return CONFIGS",
"def get_app_config(self) -> Dict[str, Any]:\n return self.app_config",
"def get_config():\n try:\n if os.getenv(\"APP_ENV\") == \"DEV\":\n return {\n \"EXAMPLE_ID\": \"EXAMPLE\",\n }\n # else return PRODUCTION\n return {\n \"EXAMPLE_ID\": os.getenv(\"EXAMPLE_ID\"),\n }\n except Exception as exception:\n raise Exception(\n \"Unable to get CONFIG:\", exception)",
"def Get():\n return Configuration()",
"def getMain(self):\n return self._configService.getMain()",
"def get(self):\n lc = self._client.describe_launch_configurations(LaunchConfigurationNames=[self._name])\n if len(lc[\"LaunchConfigurations\"]) == 0:\n return None\n else:\n config = lc[\"LaunchConfigurations\"][0]\n config[\"UserData\"] = base64.b64decode(config[\"UserData\"])\n return config",
"def _get_config(redis_api, log):\n config = redis_api.hget_all(\"ingest:visibility_receiver\")\n return config",
"def config(self):\r\n return skillConfig"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
a help function to print the intentinformation in format | def format_print_jobs(intent):
print "\nintentName: %s" %(intent['name'])
for k,v in intent.iteritems():
if k <> 'name':
print "\t" + str(k) + ": " + str(v) | [
"def print_help_classify():",
"def cmd_info(self):\n self.cmd_author()\n self.cmd_date()\n log = self.get_log() or ''\n print(len(log))\n print(log)",
"def print_animal_info(self):",
"def print_meta(self, info):\n pass",
"def print_standout(info):\n sys.stdout.write(\"Info: %s\" % info)\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()",
"def printinfo(song):\n print(\"%9s %s %-5s %-30s %-30s %s\" % (song[\"SNG_ID\"], song[\"MD5_ORIGIN\"], song[\"MEDIA_VERSION\"], song[\"ART_NAME\"], song[\"ALB_TITLE\"], song[\"SNG_TITLE\"]))",
"def printInfoDoc():\n global _modinfo\n print _modinfo\n help(\"ProcUtils\")",
"def print_ticket_info_short(ticket):\n print(\"Subject: \", ticket.subject)\n print(\"Description\", ticket.description)\n print(\"Priority: \", ticket.priority)\n print('-' * 40)\n print('\\n')",
"def _info(message):\n print('_info >' + message)",
"def describe(ctx):",
"def process_info(process):\n\thelp(process)",
"def print_mon_info(self):\n\t\tprint('')\n\t\tprint(f'Full Name: {self.full_name}')\n\t\tprint(f'Sleepy Name: {self.sleepy_name}')\n\t\tprint(f'Awakened Name: {self.awaken_name}')\n\t\tprint(f'Element: {self.element}')\n\t\tprint(f'Grade: {self.grade}')\n\t\tprint(f'Grade Num: {self.grade_num}')\n\t\tprint(f'Mon Type: {self.mon_type}')\n\t\tprint(f'Get From: {self.get_from}')\n\t\tprint(f'When Awakened: {self.when_awakened}')\n\t\tprint(f'Good For: {str(self.good_for)}')\n\t\tprint(f'Skill Up Info: {self.skillup_info}')\n\t\tprint(f'Total Score: {self.score_total}')\n\t\tprint(f'User Score: {self.score_user}')\n\t\tprint(f'Ratings: {self.ratings}')\n\t\tprint(f'Links: {self.links}')",
"def sequenceinfo(dictchain):\n k = 0\n for s in dictchain.sequenceList:\n print \"Step::%i [\\033[1;34m%s\\033[1;m]%4s [\\033[1;32m%s\\033[1;m] \" % (k,s['input'],\"-->\",s['output'])\n #alg = \" ::--- Algorithm(s): \\n\"\n alg = \"\"\n if type(s['algorithm']) == list:\n for j in s['algorithm']:\n alg += \" |--- %s (Package: %s)\\n\" % (j.getFullName(),j.getDlls())\n alg = alg[:-1]\n else:\n j = s['algorithm']\n alg = \" |--- %s (Package: %s) \" % (j.getFullName(),j.getDlls())\n print alg\n k+=1",
"def info(str):\n if verbose:\n\n print(str)",
"def print_params():\n\n help_out = convert_phil_to_text(master_phil, att_level=1)\n txt_out = convert_phil_to_text(master_phil)\n\n return help_out, txt_out",
"def Explanation(self) -> str:",
"def help_description():\n # for ain\n print(\"--------TABLE FOR AIN(AIN4=GND)-------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | AINP | AINN |\")\n for i in range(8):\n print(\"| {} | {} | AIN{} | AIN{} |\".format(str(i), bin(i)[2:].zfill(3), DICT_AIN[i][0],\n DICT_AIN[i][1]))\n print(\"--------------------------------------\")\n print(\"------------TABLE FOR FSR------------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | FSR |\")\n for i in range(6):\n print(\"| {} | {} | {} |\".format(str(i), bin(i)[2:].zfill(3), DICT_FSR[i]))\n print(\"--------------------------------------\")\n print(\"------------TABLE FOR RATE------------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | RATE |\")\n for i in range(8):\n print(\"| {} | {} | {} |\".format(str(i), bin(i)[2:].zfill(3), DICT_RATE[i].rjust(7, ' ')))\n print(\"--------------------------------------\")",
"def info(object, spacing=10, collapse=1):\n methodList = [meth for meth in dir(object) if callable(getattr(object,meth))]\n processFunc = collapse and (lambda s: \" \".join(s.split())) or (lambda s: s)\n print \"\\n\".join([\"%s %s\" % (meth.ljust(spacing),\n processFunc(str(getattr(object, meth).__doc__)))\n for meth in methodList])",
"def info(object, spacing=10, collapse=1):\n methodList = [method for method in dir(object) if callable(getattr(object, method))]\n argList = [method for method in dir(object) if not callable(getattr(object, method))]\n processFunc = collapse and (lambda s: \" \".join(s.split())) or (lambda s: s)\n print \"\\n\".join([\"%s %s\" %\n (method.ljust(spacing),\n processFunc(str(getattr(object, method).__doc__)))\n for method in methodList])\n print argList"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Crawls all requested bug data and bug ids. Saves them in files (bugIDListP.pickle, bugIDList.csv, bugsData.txt ) and/or Mongo DB collections (BugIDs, BugsData) depending if they are given at initialization. | def get_all_bugs(self) -> List:
#starting point
offset = 0
#list for all bugs
resultBugList = []
#list for bug IDs
bugIDList = []
#checks if there are still results returned
notEmpty = True
#queries in 500 bug steps until the result list is empty
while notEmpty:
print("entered")
#interpretation of result as list plus formatting for eval errors
result = ast.literal_eval(self.session.get(self.bugURL + "&offset=" + str(offset)).text.
replace('true', 'True').replace('false', 'False').replace('null', 'None'))["bugs"]
#checks if the query needs to be set again with a new offset
if result:
resultBugList += result
else:
notEmpty = False
#gets the ID out of all comments
partList = [bug["id"] for bug in result]
bugIDList += partList
#sets new starting point
offset += 500
#inserts bug ids and bugs into db if given one
if self.mongoDB:
for id in bugIDList:
self.mongoDB["BugIDs"].insert_one({"ID": id})
self.mongoDB["BugsData"].insert_many(resultBugList)
#creates files for bug ids and bugs if given a folder
if self.folder:
#saves bug list as python object
with open(self.folderpath + "bugIDListP.pickle", "wb") as a:
pickle.dump(bugIDList, a)
#saves bug list as csv
with open(self.folderpath + "bugIDList.csv", "w") as b:
for id in bugIDList:
b.write(str(id) + "\n")
with open(self.folderpath + "bugsData.txt", "w") as c:
for bug in resultBugList:
c.write(str(bug) + "\n")
#returns List Object for further processing
return(bugIDList) | [
"def run(self, bugzilla_loc, first_bug=1, last_bug=52535, outdir=\"bugs\", outfile=\"bugs.json\"):\n\t\ttry:\n\t\t\tos.makedirs(outdir)\n\t\texcept:\n\t\t\tpass\n\t\tdef get_bug_path(bid):\n\t\t\treturn os.path.join(outdir, \"%s.json\" % (bid))\n\n\t\tfor bug in range(first_bug, last_bug):\n\t\t\tlog.info(\"Grab bug %d\" % bug)\n\n\t\t\tbout = get_bug_path(bug)\n\t\t\tif not os.path.exists(bout):\n\t\t\t\ttry:\n\t\t\t\t\tdlpage = \"%sshow_bug.cgi?ctype=xml&id=%s\" % (bugzilla_loc, bug)\n\t\t\t\t\tdct = xmltodict.parse(getpage(dlpage).read())\n\t\t\t\t\t#write out to separate dir\n\t\t\t\t\twith open(bout, \"w\") as fp:\n\t\t\t\t\t\tjson.dump(dct, fp)\n\t\t\t\texcept:\n\t\t\t\t\tlog.error(\"Couldn't download bug %d\" % bug)\n\t\t\t\t\ttraceback.print_exc()\n\n\t\t\t\ttime.sleep(random.random() * 0.1)\n\n\t\tbugs = dict()\n\t\tfor bug in range(first_bug, last_bug):\n\t\t\tlog.info(\"Collect bug %d\" % bug)\n\t\t\tbout = get_bug_path(bug)\n\t\t\twith open(bout, \"r\") as fp:\n\t\t\t\tbjs = json.load(fp)\n\t\t\tbugs[bug] = bjs[\"bugzilla\"][\"bug\"]\n\n\t\twith open(outfile, \"w\") as fp:\n\t\t\tjson.dump(bugs, fp, indent=4)",
"def reprocess_bugs():\n # we fetch bugs in chunks to reduce race condition chances\n pages = Paginator(Bug.objects.all(), 50)\n print 'Processing %d bugs' % pages.count\n for pnum in pages.page_range:\n for b in pages.page(pnum).object_list:\n b.save()\n sys.stdout.write('.')\n sys.stdout.flush()\n print '\\nDone.'",
"def get_all_comments(self, idList: Union[List, str]) -> None:\n\n #loads pickle list if it is one\n if type(idList) == str and \".pickle\" in idList:\n print(\"pickle load\")\n with open(idList, \"rb\") as f:\n idList = pickle.load(f)\n elif type(idList) == str:\n print(\"Error: Buglist parameter seems to be neither a List object or the name of a pickle file \"\n \"(needs to contain .pickle).\")\n\n #goes through idList\n for id in tqdm(idList):\n #performs request and replaces trouble some parts\n commentsString = self.session.get(self.commentURL.format(id)).text.\\\n replace('true', 'True').replace('false', 'False').replace('null', 'None')\n #gets only the comments\n commentsDict = ast.literal_eval(commentsString)[\"bugs\"][str(id)][\"comments\"]\n\n #enters comments into db or file if there are any comments for the id\n if commentsDict:\n if self.mongoDB:\n self.mongoDB[\"Comments\"].insert_many(commentsDict)\n if self.folder:\n with open(self.folderpath + \"Bugzilla_Comments.txt\", 'a') as f:\n f.write(str(commentsDict) + \"\\n\")",
"def populate_db(self):\n global MANAGER\n jira_ids = self._db.unique_jira_ids_from_git()\n LOG.info(\"retrieving %s jira_ids from the issue tracker\", len(jira_ids))\n counter = MANAGER.counter(total=len(jira_ids), desc='fetch from Jira', unit='issue')\n chunk_size = 50\n chunks = [jira_ids[i:i + chunk_size] for i in range(0, len(jira_ids), chunk_size)]\n\n cnt = 0\n for chunk in chunks:\n query = \"key in (\" + \",\".join([(\"'\" + jira_id + \"'\") for jira_id in chunk]) + \")\"\n results = self.client.search_issues(jql_str=query, maxResults=chunk_size,\n fields='fixVersions')\n for result in results:\n jira_id = result.key\n fix_versions = [version.name for version in result.fields.fixVersions]\n for fix_version in fix_versions:\n self._db.apply_fix_version(jira_id, fix_version)\n cnt += 1\n if cnt % 50:\n self._db.flush_commits()\n counter.update(incr=len(chunk))\n time.sleep(5)\n self._db.flush_commits()",
"def bz_search_handler():\n bugs = []\n try:\n bugs = bz.get_matching_bugs('whiteboard', '\\[autoland.*\\]')\n except (urllib2.HTTPError, urllib2.URLError), e:\n log.error(\"Error while polling bugzilla: %s\" % (e))\n return\n if not bugs:\n return\n\n for (bug_id, whiteboard) in bugs:\n tag = get_first_autoland_tag(whiteboard)\n #log.debug('Bug %s with tag %s' % (bug_id, tag))\n\n if tag == None or re.search('in-queue', tag) != None:\n # Strange that it showed up if None\n continue\n\n # get the branches\n branches = get_branch_from_tag(tag)\n log.debug('Flagged for landing on branches: %s' % (branches))\n if not branches:\n # this was probably flagged [autoland], since it was picked up\n # and doesn't have a branch attached.\n log.debug('No branches from tag %s' % (tag))\n continue\n for branch in tuple(branches):\n # clean out any invalid branch names\n # job will still land to any correct branches\n if db.BranchQuery(Branch(name=branch)) == None:\n branches.remove(branch)\n log.error('Branch %s does not exist.' % (branch))\n\n # If there are no correct or permissive branches, go to next bug\n if not branches:\n continue\n\n log.debug('Found and processing tag %s' % (tag))\n # get the explicitly listed patches, if any\n patch_group = get_patches_from_tag(tag) if not None else []\n\n # get try syntax, if any\n try_syntax = get_try_syntax_from_tag(tag)\n\n ps = PatchSet()\n # all runs will get a try_run by default for now\n ps.try_syntax = try_syntax\n ps.branch = ','.join(branches)\n ps.patches = patch_group\n ps.bug_id = bug_id\n\n # check patch reviews & permissions\n patches = get_patchset(ps.bug_id, ps.try_run,\n ps.patchList(), review_comment=False)\n if not patches:\n # do not have patches to push, kick it out of the queue\n bz.remove_whiteboard_tag(tag.replace('[', '\\[').replace(']', '\\]'),\n bug_id)\n log.error('No valid patches attached, nothing for '\n 'Autoland to do here, removing this bug from the queue.')\n continue\n ps.author = patches[0]['author']['email']\n ps.patches = ','.join(str(x['id']) for x in patches)\n\n if db.PatchSetQuery(ps) != None:\n # we already have this in the db, don't add it.\n # Remove whiteboard tag, but don't add to db and don't comment.\n log.debug('Duplicate patchset, removing whiteboard tag.')\n bz.remove_whiteboard_tag(tag.replace('[', '\\[').replace(']','\\]'),\n bug_id)\n continue\n\n # add try_run attribute here so that PatchSetQuery will match patchsets\n # in any stage of their lifecycle\n ps.try_run = 1\n\n log.info('Inserting job: %s' % (ps))\n patchset_id = db.PatchSetInsert(ps)\n log.info('Insert Patchset ID: %s' % (patchset_id))\n\n bz.replace_whiteboard_tag('\\[autoland[^\\[\\]]*\\]',\n '[autoland-in-queue]', bug_id)",
"def getIssues(db, data):\n start = datetime.utcnow() # Time this and log how long refreshing took.\n try:\n cur = getRelevantIssues(db, data)\n except pymongo.errors.PyMongoError as e:\n return {\"error\": \"Error querying the Mongo database: \" +\n e.message}\n\n count = 0\n dbd_data = {\n # TODO: make sets of these to make the lookups below faster\n \"SLA\": data.get(\"SLA\", []),\n \"FTS\": data.get(\"FTS\", []),\n \"REV\": [], # Just refresh these every time\n \"UNA\": data.get(\"UNA\", []),\n \"active\": data.get(\"active\", {}),\n \"waiting\": data.get(\"waiting\", {})\n }\n\n try:\n revIssues = getREVIssues(db)\n except pymongo.errors.PyMongoError as e:\n return {\"error\": \"Error querying the Mongo database: \" +\n e.message}\n\n updated_data = {\n \"SLA\": [],\n \"FTS\": [],\n \"REV\": revIssues,\n \"UNA\": []\n }\n for i in cur:\n count += 1\n issue = SupportIssue().fromDoc(i)\n\n # Keep track of the totals:\n # --- Active issue count ---\n if issue.isActive():\n dbd_data['active'][issue.key] = 1\n elif issue.key in dbd_data['active']:\n del dbd_data['active'][issue.key]\n # --- Waiting For Customer issue count ---\n if issue.isWFC() and not issue.doc['deleted']:\n dbd_data['waiting'][issue.key] = 1\n elif issue.key in dbd_data['waiting']:\n del dbd_data['waiting'][issue.key]\n\n # For each category, see if the issue belongs, and if not, remove it\n # from the dashboard issues if it was there.\n if isSLA(issue):\n updated_data[\"SLA\"].append(trimmedSLAIssue(issue))\n else:\n removeCompressedIssueIfPresent(issue, dbd_data[\"SLA\"])\n if isFTS(issue):\n updated_data[\"FTS\"].append(trimmedFTSIssue(issue))\n else:\n removeCompressedIssueIfPresent(issue, dbd_data[\"FTS\"])\n if isUNA(issue):\n updated_data[\"UNA\"].append(trimmedUNAIssue(issue))\n else:\n removeCompressedIssueIfPresent(issue, dbd_data[\"UNA\"])\n\n mergeAndSortIssues(dbd_data, updated_data)\n\n duration = datetime.utcnow() - start\n logger.info(\"getIssues took {0}, count: {1}\".format(duration, count))\n return dbd_data",
"def get_bug_data(self, current_date=None):\n start_time = time.time()\n bug_data = self.web_connection.get_async_data_using_asyncio_paginated(self.bug_url, self.web_constants, 5)\n end_time = time.time()\n # print(f\"Commit data using Parallel (asyncio)\\n {commit_data}\\n\\n\")\n print(f\"Time Taken to Fetch Bug Details {end_time - start_time}\")\n\t\t\n bugs_parser = BugsJsonParser()\n bug_list_df = bugs_parser.parse_json(bug_data)\n\n if current_date is None:\n current_date = datetime.today().strftime('%Y-%m-%d')\n directory = f\"{CDPConfigValues.schedule_file_path}/{self.project_name}/{current_date}\"\n CDPConfigValues.create_directory(directory)\n bug_list_df.to_csv(\n f\"{CDPConfigValues.schedule_file_path}/{self.project_name}/{current_date}/\"\n f\"{CDPConfigValues.project_issue_list_file_name}\",\n index=False)\n else:\n bug_list_df.to_csv(f\"{self.cdp_dump_path}/{CDPConfigValues.project_issue_list_file_name}\", index=False)\n\n return bug_list_df",
"def bugs(self):\n raw_bug_list = self.changelog[\"text\"]\n self._logger.debug(\"Raw bug list {0}\".format(raw_bug_list))\n return self._bug_id_fetcher(raw_bug_list)",
"def get_bugs(self, year):\n directory = self.get_bugs_path(year)\n for path in self._get_files(directory, pattern='bugs.*.json'):\n for bug in helpers.load_json(path):\n yield bug",
"def run(self):\n if self.parsed_args.fetch_cache:\n issues = self.backend.fetch_from_cache()\n else:\n issues = self.backend.fetch(from_date=self.from_date)\n\n try:\n for issue in issues:\n obj = json.dumps(issue, indent=4, sort_keys=True)\n # self.outfile.write(issue['url']+\"\\n\")\n self.outfile.write(obj)\n self.outfile.write('\\n')\n except requests.exceptions.HTTPError as e:\n raise requests.exceptions.HTTPError(str(e.response.json()))\n except IOError as e:\n raise RuntimeError(str(e))\n except Exception as e:\n if self.backend.cache:\n self.backend.cache.recover()\n raise RuntimeError(str(e))",
"def filter_bugs(bugs):\n counter = 1\n returned_bugs = []\n for bug in bugs:\n\n add_bug=True\n if args.open == True:\n if bug.status not in [\"IN_PROGRESS\", \"UNCONFIRMED\", \"CONFIRMED\"]:\n add_bug=False\n if args.verbose:\n print (\"ignoring (not open) :\",bug.id, bug.status)\n if args.closed == True:\n if bug.status not in [\"RESOLVED\", \"VERIFIED\"]:\n add_bug=False\n if args.verbose:\n print (\"ignoring (not open) :\",bug.id, bug.status)\n if args.version is not None:\n if bug.version != args.version:\n add_bug=False\n if args.verbose:\n print (\"ignoring (version) :\",bug.id, bug.version)\n if args.state is not None:\n #if float(bug.cf_uctt_build_version) > float(args.state):\n if bug.cf_uctt_build_version in args.state:\n add_bug=False\n if args.verbose:\n print (\"ignoring (state) :\",bug.id, bug.cf_uctt_build_version)\n if args.fromdate is not None:\n size_x = len(args.fromdate)\n creation_time = str(bug.creation_time)[:size_x]\n #print (creation_time, \" > \", args.fromdate)\n if creation_time < args.fromdate:\n add_bug=False\n if args.verbose:\n print (\"ignoring (fromdate) :\",bug.id, bug.creation_time)\n # remove editoral bugs, if the input arguments is not set\n if args.include_editorial is False:\n if bug.severity in [\"editorial\"]:\n add_bug=False\n\n #remove action items, if the input argument is not set\n if args.include_action is False:\n if bug.severity in [\"action\"]:\n add_bug=False\n\n if add_bug:\n returned_bugs.append(bug)\n\n return returned_bugs",
"def run(self):\n if self.parsed_args.fetch_cache:\n issues = self.backend.fetch_from_cache()\n else:\n issues = self.backend.fetch(from_date=self.from_date)\n\n try:\n for issue in issues:\n obj = json.dumps(issue, indent=4, sort_keys=True)\n self.outfile.write(obj)\n self.outfile.write('\\n')\n except IOError as e:\n raise RuntimeError(str(e))\n except Exception as e:\n if self.backend.cache:\n self.backend.cache.recover()\n raise RuntimeError(str(e))",
"def list_all_bugs(bugs):\n counter = 1\n for bug in bugs:\n bug_id = bug.id\n bug_sum = str(bug.summary)\n bug_sev = str(bug.severity)\n bug_stat = str(bug.status)\n bug_comp = str(bug.component)\n try:\n print (counter, bug_id, bug_comp, bug_sum, bug_sev, bug_stat)\n except:\n print (counter, bug_id)\n counter = counter + 1",
"def process_defect_log(self):\n with open(self.defectLog) as fp:\n content = fp.readlines()\n\n baseURL = \"https://github.com/ARM-software/tf-issues/issues/\"\n\n # Get defect id, title and URL link to populate the defect dictionary\n for i in content:\n i_strip = i.strip()\n\n titleIDRegex = \"^Found open bug with id: ([0-9]+): (.*)\"\n mIDTitle = re.match(titleIDRegex, i)\n\n if mIDTitle:\n defectID = mIDTitle.group(1)\n defectTitle = mIDTitle.group(2)\n defectURL = baseURL + mIDTitle.group(1)\n\n self.defectDict[defectID] = {}\n self.defectDict[defectID]['title'] = defectTitle.split(',')[0]\n self.defectDict[defectID]['url'] = defectURL\n self.defectDict[defectID]['state'] = defectTitle.split(',')[1]",
"def do_the_issues(user_id, repo_id):\n with tempfile.TemporaryDirectory() as tmp:\n path = os.path.join(tmp, \"{}_{}_issues.txt\".format(repo_id, user_id))\n issues_initial_url = get_initial_url_issues(user_id, repo_id)\n resp_obj = requests.get(issues_initial_url, headers=headers)\n # prase the initial request. for Issue\n all_issues = json.loads(resp_obj.text)\n with open(path, \"w\") as out_stream:\n for an_issue in all_issues:\n print(an_issue, file=out_stream)\n print(\"the len of resp is {}\".format(len(all_issues)))\n LINK_HEADER = \"Link\"\n next_url = None\n if LINK_HEADER in resp_obj.headers:\n # parse next page (if present)\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n # subsequent page\n while next_url:\n resp_obj = requests.get(next_url, headers=headers)\n all_issues = json.loads(resp_obj.text)\n with open(path, \"a\") as out_stream:\n for an_issue in all_issues:\n print(an_issue, file=out_stream)\n if LINK_HEADER in resp_obj.headers:\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n print(next_url)\n else:\n next_url = None\n GsUpload.upload_blob(GS_BUCKET_NAME, path, basename(path))\n print(\"the issues path is \" + str(path))",
"def scrape_issues(self, url):\n try:\n self.driver.get(url)\n except common.exceptions.InvalidSessionIdException:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n except Exception:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n buganizer_issues = []\n\n if \"Buganizer\" not in page_title or \"componentid\" not in page_title:\n if \"MOMA Single Sign On\" in page_title:\n error_message = \"ERROR: You must log into your MOMA account \"\\\n \"first. Select the 'Use Security Code' option and generate a security code at go/sc.\\n\"\n self.logger.log(error_message)\n\n while \"Buganizer\" not in page_title:\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n time.sleep(1)\n\n return buganizer_issues\n error_message = \"ERROR: URL does not link to a Buganizer \"\\\n \"componentid, check specified URL \"\\\n \"in constants.py\\n\"\n self.logger.log(error_message)\n return buganizer_issues\n\n for tbody in soup.find_all('tbody'):\n for _tr in tbody.find_all('tr'):\n issue_link = \"https://b.corp.google.com/issues/\" + _tr.get(\n 'data-row-id')\n buganizer_issues.append(issue_link)\n return buganizer_issues",
"def harvest_all():\n db = db_connect()\n dois = None\n get_commits()\n get_citations(db, dois)\n get_mentions(since_version=None)\n list_records(dois)",
"def correlate_changeids(self, outdir=\"bugs/changeids\", infile=\"bugs.json\", outfile=\"bugs-commits.csv\"):\n\n\t\tbugs = None\n\t\twith open(infile, \"r\") as fp:\n\t\t\tbugs = json.load(fp)\n\n\t\ttry:\n\t\t\tos.makedirs(outdir)\n\t\texcept:\n\t\t\tpass\n\t\tdef get_out_path(bid):\n\t\t\treturn os.path.join(outdir, \"%s.json\" % (bid))\n\n\t\tgerrit_url = \"https://gerrit.wikimedia.org/\"\n\t\tpat = re.compile(gerrit_url + \"r/#/c/(\\\\d+)/\")\n\n\t\tdef get_gerrit_change_detail(cid):\n\t\t\turl = \"%sr/changes/%d/detail\" % (gerrit_url, cid)\n\t\t\tdetail = getpage(url).read()\n\t\t\t#)]}' at the beginning of the change\n\t\t\tCRUFT_LENGTH = 4\n\t\t\treturn json.loads(detail[4:])\n\n\t\tdef get_gerrit_change_detail_service(cid):\n\t\t\t\"\"\"Get UI change detail for the given change id\n\t\t\tThis isn't really guaranteed to keep working, but gives revision hashes.\n\t\t\t(in .result.patchSets[n].revision.id)\n\t\t\t\"\"\"\n\t\t\turl = \"%sr/gerrit_ui/rpc/ChangeDetailService\" % (gerrit_url)\n\t\t\tdata = {\n\t\t\t\t\"id\": 1,\n\t\t\t\t\"jsonrpc\": \"2.0\",\n\t\t\t\t\"method\": \"changeDetail\",\n\t\t\t\t\"params\": [{\n\t\t\t\t\t\"id\": cid\n\t\t\t\t}]\n\t\t\t}\n\t\t\tdata_encoded = bytes(json.dumps(data), \"utf-8\")\n\t\t\theaders = {\n\t\t\t\t\"Accept\": \"application/json,application/json,application/jsonrequest\",\n\t\t\t\t\"Content-Type\": \"application/json; charset=UTF-8\",\n\t\t\t\t\"Content-Length\": len(data_encoded)\n\t\t\t}\n\t\t\tdetail = getpage(url, data_encoded, headers).read()\n\t\t\tjsr = json.loads(detail.decode())\n\t\t\tif \"result\" in jsr:\n\t\t\t\treturn jsr[\"result\"]\n\t\t\telse:\n\t\t\t\treturn None\n\n\t\tcollectable = []\n\n\t\tfor (bugid, bug) in bugs.items():\n\t\t\tif \"long_desc\" in bug:\n\t\t\t\tif isinstance(bug[\"long_desc\"], dict):\n\t\t\t\t\tbug[\"long_desc\"] = [bug[\"long_desc\"]]\n\t\t\t\tfor desc in bug[\"long_desc\"]:\n\t\t\t\t\tif \"thetext\" in desc and desc[\"thetext\"] is not None:\n\t\t\t\t\t\tmatches = pat.finditer(desc[\"thetext\"])\n\t\t\t\t\t\tfor match in matches:\n\t\t\t\t\t\t\tchangeno = int(match.group(1))\n\t\t\t\t\t\t\t#Gerrit detail json like:\n\t\t\t\t\t\t\t#\thttps://gerrit.wikimedia.org/r/changes/67311/detail\n\t\t\t\t\t\t\t#where 67311 is the change id.\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\toutpath = get_out_path(bugid)\n\t\t\t\t\t\t\t\tif not os.path.exists(outpath):\n\t\t\t\t\t\t\t\t\tlog.info(\"Collect change id %s\" % bugid)\n\t\t\t\t\t\t\t\t\tcont = get_gerrit_change_detail_service(changeno)\n\t\t\t\t\t\t\t\t\tif cont is None:\n\t\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\t\twith open(outpath, \"w\") as fp:\n\t\t\t\t\t\t\t\t\t\tjson.dump(cont, fp)\n\t\t\t\t\t\t\t\tcollectable.append((bugid, outpath))\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tlog.error(\"Couldn't collect change id %s\" % bugid)\n\t\t\t\t\t\t\t\ttraceback.print_exc()\n\n\t\t#collect change ids\n\t\twith open(outfile, \"wt\") as fp:\n\t\t\twriter = csv.writer(fp)\n\t\t\twriter.writerow([\"bug\", \"revhash\"])\n\n\t\t\tfor (bugid, idpath) in collectable:\n\t\t\t\twith open(idpath, \"r\") as fp:\n\t\t\t\t\tjs = json.load(fp)\n\t\t\t\t\tfor ps in js[\"patchSets\"]:\n\t\t\t\t\t\twriter.writerow([bugid, ps[\"revision\"][\"id\"].strip()])",
"def run(self):\n for subreddit in self.subreddits:\n for post in self.get_posts(subreddit.strip()):\n if self.is_unique(post.id):\n imgur_link = self.get_imgur_link(post)\n if imgur_link:\n res = self.get_resolution(post.title)\n if res and self.check_res(res):\n self.update_download_list(imgur_link)\n self.add_post_id(post.id)\n if self.ids_change:\n self.update_post_id_file()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Crawls for all comments belonging to the bugs in the BugIDList. | def get_all_comments(self, idList: Union[List, str]) -> None:
#loads pickle list if it is one
if type(idList) == str and ".pickle" in idList:
print("pickle load")
with open(idList, "rb") as f:
idList = pickle.load(f)
elif type(idList) == str:
print("Error: Buglist parameter seems to be neither a List object or the name of a pickle file "
"(needs to contain .pickle).")
#goes through idList
for id in tqdm(idList):
#performs request and replaces trouble some parts
commentsString = self.session.get(self.commentURL.format(id)).text.\
replace('true', 'True').replace('false', 'False').replace('null', 'None')
#gets only the comments
commentsDict = ast.literal_eval(commentsString)["bugs"][str(id)]["comments"]
#enters comments into db or file if there are any comments for the id
if commentsDict:
if self.mongoDB:
self.mongoDB["Comments"].insert_many(commentsDict)
if self.folder:
with open(self.folderpath + "Bugzilla_Comments.txt", 'a') as f:
f.write(str(commentsDict) + "\n") | [
"def get_all_comments(self):\n\n scores = []\n posts = [p[1] for p in self.posts]\n total_comments = \"\"\n\n i = j = 1\n # creates a long text block with all the comments from a single post that is either saved in a corpus file or\n # returned in terminal\n for p in posts:\n total_comments += \"Comments from post:\\n\"\n j += 1\n print \"Working on getting comments and flattening the comment tree...\"\n # to make praw.replace_more_comments more efficient, the limit and threshold is set at 10, meaning that\n # it will make only 10 additional requests, and only make requests that give back 10 additional comments.\n # This is limited because each request requires a 2 second delay by PRAW and if no limits are set, the program\n # will become very slow.\n p.replace_more_comments(limit=10, threshold=5)\n coms = praw.helpers.flatten_tree(p.comments)\n for c in coms:\n print \"\\nWorking on comment #{}...\".format(i)\n comment = self.token_and_tag(c.body)\n if comment:\n score = c.score\n total_comments += \"New comment:\\nKarma: {0}\\n{1}\\n\\n\".format(score, comment)\n scores.append(score)\n i+=1\n total_comments += \"\\n\"+\"+\"*30 +\"\\n\"\n\n # saves all the comments from a post in one corpus file on a post by post basis\n if self.save and self.function_call != \"cp\":\n self.save_submissions(total_comments, karma=p.score)\n\n\n if self.function_call != \"cp\" and self.loc:\n print calculate_karma(self.subreddit, self.scores, \"Karma Scores\")\n\n return total_comments",
"def _MakeGetCommentsRequest(self, bug_id, project):\n # TODO (prasadv): By default the max number of comments retrieved in\n # one request is 100. Since bisect-fyi jobs may have more then 100\n # comments for now we set this maxResults count as 10000.\n # Remove this max count once we find a way to clear old comments\n # on FYI issues.\n request = self._service.issues().comments().list(\n projectId=project, issueId=bug_id, maxResults=10000)\n return self._ExecuteRequest(request)",
"def getAllComments():",
"def get_all_bugs(self) -> List:\n #starting point\n offset = 0\n #list for all bugs\n resultBugList = []\n #list for bug IDs\n bugIDList = []\n #checks if there are still results returned\n notEmpty = True\n\n #queries in 500 bug steps until the result list is empty\n while notEmpty:\n print(\"entered\")\n #interpretation of result as list plus formatting for eval errors\n result = ast.literal_eval(self.session.get(self.bugURL + \"&offset=\" + str(offset)).text.\n replace('true', 'True').replace('false', 'False').replace('null', 'None'))[\"bugs\"]\n #checks if the query needs to be set again with a new offset\n if result:\n resultBugList += result\n else:\n notEmpty = False\n\n #gets the ID out of all comments\n partList = [bug[\"id\"] for bug in result]\n bugIDList += partList\n #sets new starting point\n offset += 500\n\n #inserts bug ids and bugs into db if given one\n if self.mongoDB:\n for id in bugIDList:\n self.mongoDB[\"BugIDs\"].insert_one({\"ID\": id})\n self.mongoDB[\"BugsData\"].insert_many(resultBugList)\n\n #creates files for bug ids and bugs if given a folder\n if self.folder:\n #saves bug list as python object\n with open(self.folderpath + \"bugIDListP.pickle\", \"wb\") as a:\n pickle.dump(bugIDList, a)\n #saves bug list as csv\n with open(self.folderpath + \"bugIDList.csv\", \"w\") as b:\n for id in bugIDList:\n b.write(str(id) + \"\\n\")\n with open(self.folderpath + \"bugsData.txt\", \"w\") as c:\n for bug in resultBugList:\n c.write(str(bug) + \"\\n\")\n\n #returns List Object for further processing\n return(bugIDList)",
"def post_process_comments(self):\r\n comment_url = self.github_url + '/issues/comments'\r\n self._post_process_comments(comment_url)",
"async def scrape_comments(self):\n\n subreddit_origin = await self.reddit.subreddit(self.subreddit)\n\n comment_count = 0\n async for comment in subreddit_origin.comments(limit=self.limit):\n if self.memory.contains(comment.id):\n continue\n\n self.memory.add(comment.id)\n\n # Parse Comment\n comment = self.parse_comment(comment)\n\n # Save in Pub/Sub\n if self.enable_publish:\n self.publish(comment)\n\n comment_count += 1\n\n return comment_count",
"def get_comments_with_tickers(self, comments_list, submission_data):\n # filter each comment for ticker mentions\n ticker_comments = list()\n for i, comment in enumerate(comments_list):\n tickers = self.filter_valid_tickers(comment)\n # if tickers mentioned in text, get comment data\n if len(tickers) > 0:\n comment_data = self.get_comment_data(comment)\n\n # if comment data available, add tickers to dict and add to result list\n if comment_data is not None:\n comment_data[\"tickers\"] = tickers\n ticker_comments.append(comment_data)\n\n # flush data to db when extracted 100 ticker comments, reset result list\n if len(ticker_comments) > 99:\n self.insert_new_data_to_db(ticker_comments, submission_data)\n ticker_comments = list()\n\n # print number of comments that been reviewed\n if i+1 % 100 == 0:\n print(f\"Numbers of comments from submission assessed: {i}\")\n\n # write any remaining ticker comments\n if len(ticker_comments) > 0:\n self.insert_new_data_to_db(ticker_comments, submission_data)",
"def lookup_comment_list(self):\n if self.thread_id is None:\n return None, None\n\n # Just pulling a single issue here so pagination shouldn't be problem\n my_req = self.raw_pull(self.thread_id)\n if my_req.status_code != 200:\n raise GitHubAngry('Bad status code %s because %s' % (\n my_req.status_code, my_req.reason))\n issue_json = my_req.json()\n comments_url = issue_json['comments_url'] + self.url_extras\n kwargs = {} if not self.user else {'auth': (self.user, self.token)}\n comments_json = []\n while comments_url:\n logging.debug('Pulling comments URL: %s', comments_url)\n c_req = requests.get(comments_url, **kwargs)\n my_json = c_req.json()\n assert isinstance(my_json, list)\n comments_json.extend(my_json)\n comments_url = None\n if 'link' in c_req.headers: # need to handle pagination.\n logging.debug('Paginating in lookup_comment_list')\n link = c_req.headers['link'].split(',')\n for thing in link:\n potential_url, part = thing.split('; ')\n if part == 'rel=\"next\"':\n comments_url = potential_url.lstrip(' <').rstrip('> ')\n\n return issue_json, comments_json",
"def _get_comments(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n return filter(lambda x: len(x) == 40, os.listdir(self.paths['comments']))",
"def get_comments(yt_id):\n\n client = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,developerKey=DEVELOPER_KEY)\n\n video_comments = client.commentThreads().list(\n videoId = yt_id,\n part=\"snippet,replies\").execute()\n\n comment_items = video_comments['items']\n\n class MLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.strict = False\n self.convert_charrefs= True\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\n def strip_tags(html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()\n\n comments = []\n for sub_block in comment_items:\n comments.append(strip_tags(sub_block['snippet']['topLevelComment']['snippet']['textDisplay']))\n\n comments_all = ' '.join(comments)\n\n print(\"YouTube comments scanned\")\n return comments_all",
"def GetIssueComments(self, bug_id, project='chromium'):\n if not bug_id or bug_id < 0:\n return None\n response = self._MakeGetCommentsRequest(bug_id, project=project)\n if not response:\n return None\n return [{\n 'id': r['id'],\n 'author': r['author'].get('name'),\n 'content': r['content'],\n 'published': r['published'],\n 'updates': r['updates']\n } for r in response.get('items')]",
"def handle_comments():\n comments = db.CommentGetNext(limit=5) # Get up to 5 comments\n for comment in comments:\n # Note that notify_bug makes multiple retries\n success = bz.notify_bug(comment.comment, comment.bug)\n if success:\n # Posted. Get rid of it.\n db.CommentDelete(comment)\n elif comment.attempts == 5:\n # 5 attempts have been made, drop this comment as it is\n # probably not going anywhere.\n try:\n with open('failed_comments.log', 'a') as fc_log:\n fc_log.write('%s\\n\\t%s'\n % (comment.bug, comment.comment))\n except IOError, err:\n log.error('Unable to append to failed comments file.')\n log.error(\"Could not post comment to bug %s. Dropping comment: %s\"\n % (comment.bug, comment.comment))\n db.CommentDelete(comment.id)\n else:\n comment.attempts += 1\n db.CommentUpdate(comment)",
"def request_issue_comments_regexp(cfg, issue_key, comment_re):\n # pylint: disable=too-many-nested-blocks\n\n comments = []\n comments_url = cjm.request.make_cj_url(cfg, \"issue\", issue_key, \"comment\")\n\n start_at = 0\n max_results = 50\n\n while True:\n response = cjm.request.make_cj_request(\n cfg, comments_url,\n params={\"startAt\": start_at, \"maxResults\": max_results})\n response_json = response.json()\n\n for comment in response_json[\"comments\"]:\n for content_l1 in comment[\"body\"][\"content\"]:\n if content_l1[\"type\"] == JIRA_COMMENT_CONTENT_TYPE_PARAGRAPH:\n for content_l2 in content_l1[\"content\"]:\n if content_l2[\"type\"] == JIRA_COMMENT_CONTENT_TYPE_TEXT:\n m = comment_re.match(content_l2[\"text\"])\n if m is not None:\n comments.append(m)\n\n start_at += max_results\n\n if start_at >= response_json[\"total\"]:\n break\n\n return comments",
"def get_comments(storyid):\n return ...",
"def load_comments(self, storyID):\n \n sub = self.r.get_submission(storyID)\n story = base.Story(sub.id, sub.title, sub.permalink, [], sub.score)\n for com in sub.comments:\n reply_list = self.load_replies(com)\n comment = base.Comment(com.id, com.author.name, com.body, reply_list)\n story.comment_list.append(comment)\n return story",
"def main(u, o):\n click.echo(f\"Web crawling on {u} started successfully...\")\n\n comment_regex = re.compile('<!--(.*?-->)')\n\n with requests.Session() as session:\n resp = session.get(u)\n soup = BeautifulSoup(resp.text, 'lxml')\n #TODO: search for hidden attributes, may be useful\n comments = soup.find_all(text=comment_regex)\n print(comments)",
"def test_issue_get_comments(self):\n pass",
"def clean_comments(self):\r\n while True:\r\n logger.info('Starting comment cleaning')\r\n for comment in self.reddit.user.me().comments.new():\r\n logger.info(f'Considering comment {comment.submission}/{comment} with score {comment.score}')\r\n if comment.score < MIN_COMMENT_SCORE:\r\n logger.info(f'Score too low! Deleting comment')\r\n comment.delete()\r\n time.sleep(CLEAN_COMMENT_INTERVAL)",
"def handle_comments(self):\r\n comments = Comment.objects.all()\r\n for c in comments:\r\n new = ThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n user = c.user,\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = not c.is_removed\r\n )\r\n new.save()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Crawls for all comments belonging to the bugs in the BugIDList utilizing parallelization. | def get_all_comments_mp(self, list: Union[List, str], workers: int = 10) -> None:
# loads pickle list if it is one
if type(list) == str and ".pickle" in list:
print("wat")
with open(list, "rb") as f:
list = pickle.load(f)
elif type(list) == str:
print("Error: Buglist parameter seems to be neither a List object or the name of a pickle file "
"(needs to contain .pickle).")
#gets workers and splits list into chunks fitting the worker amount
pool = Pool(workers)
list = np.array(list)
lists = np.array_split(list, workers)
#each worker crawls for comments
for sub_list in lists:
print(sub_list)
pool.apply_async(self.get_all_comments, (sub_list,))
pool.close()
pool.join() | [
"def get_all_comments(self):\n\n scores = []\n posts = [p[1] for p in self.posts]\n total_comments = \"\"\n\n i = j = 1\n # creates a long text block with all the comments from a single post that is either saved in a corpus file or\n # returned in terminal\n for p in posts:\n total_comments += \"Comments from post:\\n\"\n j += 1\n print \"Working on getting comments and flattening the comment tree...\"\n # to make praw.replace_more_comments more efficient, the limit and threshold is set at 10, meaning that\n # it will make only 10 additional requests, and only make requests that give back 10 additional comments.\n # This is limited because each request requires a 2 second delay by PRAW and if no limits are set, the program\n # will become very slow.\n p.replace_more_comments(limit=10, threshold=5)\n coms = praw.helpers.flatten_tree(p.comments)\n for c in coms:\n print \"\\nWorking on comment #{}...\".format(i)\n comment = self.token_and_tag(c.body)\n if comment:\n score = c.score\n total_comments += \"New comment:\\nKarma: {0}\\n{1}\\n\\n\".format(score, comment)\n scores.append(score)\n i+=1\n total_comments += \"\\n\"+\"+\"*30 +\"\\n\"\n\n # saves all the comments from a post in one corpus file on a post by post basis\n if self.save and self.function_call != \"cp\":\n self.save_submissions(total_comments, karma=p.score)\n\n\n if self.function_call != \"cp\" and self.loc:\n print calculate_karma(self.subreddit, self.scores, \"Karma Scores\")\n\n return total_comments",
"def get_all_comments(self, idList: Union[List, str]) -> None:\n\n #loads pickle list if it is one\n if type(idList) == str and \".pickle\" in idList:\n print(\"pickle load\")\n with open(idList, \"rb\") as f:\n idList = pickle.load(f)\n elif type(idList) == str:\n print(\"Error: Buglist parameter seems to be neither a List object or the name of a pickle file \"\n \"(needs to contain .pickle).\")\n\n #goes through idList\n for id in tqdm(idList):\n #performs request and replaces trouble some parts\n commentsString = self.session.get(self.commentURL.format(id)).text.\\\n replace('true', 'True').replace('false', 'False').replace('null', 'None')\n #gets only the comments\n commentsDict = ast.literal_eval(commentsString)[\"bugs\"][str(id)][\"comments\"]\n\n #enters comments into db or file if there are any comments for the id\n if commentsDict:\n if self.mongoDB:\n self.mongoDB[\"Comments\"].insert_many(commentsDict)\n if self.folder:\n with open(self.folderpath + \"Bugzilla_Comments.txt\", 'a') as f:\n f.write(str(commentsDict) + \"\\n\")",
"def get_all_bugs(self) -> List:\n #starting point\n offset = 0\n #list for all bugs\n resultBugList = []\n #list for bug IDs\n bugIDList = []\n #checks if there are still results returned\n notEmpty = True\n\n #queries in 500 bug steps until the result list is empty\n while notEmpty:\n print(\"entered\")\n #interpretation of result as list plus formatting for eval errors\n result = ast.literal_eval(self.session.get(self.bugURL + \"&offset=\" + str(offset)).text.\n replace('true', 'True').replace('false', 'False').replace('null', 'None'))[\"bugs\"]\n #checks if the query needs to be set again with a new offset\n if result:\n resultBugList += result\n else:\n notEmpty = False\n\n #gets the ID out of all comments\n partList = [bug[\"id\"] for bug in result]\n bugIDList += partList\n #sets new starting point\n offset += 500\n\n #inserts bug ids and bugs into db if given one\n if self.mongoDB:\n for id in bugIDList:\n self.mongoDB[\"BugIDs\"].insert_one({\"ID\": id})\n self.mongoDB[\"BugsData\"].insert_many(resultBugList)\n\n #creates files for bug ids and bugs if given a folder\n if self.folder:\n #saves bug list as python object\n with open(self.folderpath + \"bugIDListP.pickle\", \"wb\") as a:\n pickle.dump(bugIDList, a)\n #saves bug list as csv\n with open(self.folderpath + \"bugIDList.csv\", \"w\") as b:\n for id in bugIDList:\n b.write(str(id) + \"\\n\")\n with open(self.folderpath + \"bugsData.txt\", \"w\") as c:\n for bug in resultBugList:\n c.write(str(bug) + \"\\n\")\n\n #returns List Object for further processing\n return(bugIDList)",
"async def scrape_comments(self):\n\n subreddit_origin = await self.reddit.subreddit(self.subreddit)\n\n comment_count = 0\n async for comment in subreddit_origin.comments(limit=self.limit):\n if self.memory.contains(comment.id):\n continue\n\n self.memory.add(comment.id)\n\n # Parse Comment\n comment = self.parse_comment(comment)\n\n # Save in Pub/Sub\n if self.enable_publish:\n self.publish(comment)\n\n comment_count += 1\n\n return comment_count",
"def get_comments_with_tickers(self, comments_list, submission_data):\n # filter each comment for ticker mentions\n ticker_comments = list()\n for i, comment in enumerate(comments_list):\n tickers = self.filter_valid_tickers(comment)\n # if tickers mentioned in text, get comment data\n if len(tickers) > 0:\n comment_data = self.get_comment_data(comment)\n\n # if comment data available, add tickers to dict and add to result list\n if comment_data is not None:\n comment_data[\"tickers\"] = tickers\n ticker_comments.append(comment_data)\n\n # flush data to db when extracted 100 ticker comments, reset result list\n if len(ticker_comments) > 99:\n self.insert_new_data_to_db(ticker_comments, submission_data)\n ticker_comments = list()\n\n # print number of comments that been reviewed\n if i+1 % 100 == 0:\n print(f\"Numbers of comments from submission assessed: {i}\")\n\n # write any remaining ticker comments\n if len(ticker_comments) > 0:\n self.insert_new_data_to_db(ticker_comments, submission_data)",
"def post_process_comments(self):\r\n comment_url = self.github_url + '/issues/comments'\r\n self._post_process_comments(comment_url)",
"def lookup_comment_list(self):\n if self.thread_id is None:\n return None, None\n\n # Just pulling a single issue here so pagination shouldn't be problem\n my_req = self.raw_pull(self.thread_id)\n if my_req.status_code != 200:\n raise GitHubAngry('Bad status code %s because %s' % (\n my_req.status_code, my_req.reason))\n issue_json = my_req.json()\n comments_url = issue_json['comments_url'] + self.url_extras\n kwargs = {} if not self.user else {'auth': (self.user, self.token)}\n comments_json = []\n while comments_url:\n logging.debug('Pulling comments URL: %s', comments_url)\n c_req = requests.get(comments_url, **kwargs)\n my_json = c_req.json()\n assert isinstance(my_json, list)\n comments_json.extend(my_json)\n comments_url = None\n if 'link' in c_req.headers: # need to handle pagination.\n logging.debug('Paginating in lookup_comment_list')\n link = c_req.headers['link'].split(',')\n for thing in link:\n potential_url, part = thing.split('; ')\n if part == 'rel=\"next\"':\n comments_url = potential_url.lstrip(' <').rstrip('> ')\n\n return issue_json, comments_json",
"def _MakeGetCommentsRequest(self, bug_id, project):\n # TODO (prasadv): By default the max number of comments retrieved in\n # one request is 100. Since bisect-fyi jobs may have more then 100\n # comments for now we set this maxResults count as 10000.\n # Remove this max count once we find a way to clear old comments\n # on FYI issues.\n request = self._service.issues().comments().list(\n projectId=project, issueId=bug_id, maxResults=10000)\n return self._ExecuteRequest(request)",
"def request_issue_comments_regexp(cfg, issue_key, comment_re):\n # pylint: disable=too-many-nested-blocks\n\n comments = []\n comments_url = cjm.request.make_cj_url(cfg, \"issue\", issue_key, \"comment\")\n\n start_at = 0\n max_results = 50\n\n while True:\n response = cjm.request.make_cj_request(\n cfg, comments_url,\n params={\"startAt\": start_at, \"maxResults\": max_results})\n response_json = response.json()\n\n for comment in response_json[\"comments\"]:\n for content_l1 in comment[\"body\"][\"content\"]:\n if content_l1[\"type\"] == JIRA_COMMENT_CONTENT_TYPE_PARAGRAPH:\n for content_l2 in content_l1[\"content\"]:\n if content_l2[\"type\"] == JIRA_COMMENT_CONTENT_TYPE_TEXT:\n m = comment_re.match(content_l2[\"text\"])\n if m is not None:\n comments.append(m)\n\n start_at += max_results\n\n if start_at >= response_json[\"total\"]:\n break\n\n return comments",
"def crawl(self):\n try:\n self.crawl_pages()\n self.crawl_posts()\n self.crawl_comments()\n except Exception as exception:\n self.handle_request_limit(exception)",
"def crawl_comment_by_c3():\n products = Product.select().where(Product.crawl_comment_num ==0) # 没有爬过评论的\n for i, product in enumerate(products):\n if i<2:\n continue\n\n product_id = product.product_id\n if CrawlLog.select().where(CrawlLog.product_id==product_id, CrawlLog.success==True).count() > 10:\n print('抓取历史log大于100条跳过')\n continue\n\n url = 'http://127.0.0.1:5000/crawl_comment/{}/0'.format(product_id)\n resp = requests.get(url)\n print(resp.text)\n Product.update(crawl_comment_num = Product.crawl_comment_num + 10).where(Product.product_id == product_id)\n\n # 测试 todo\n if i == 240:\n break",
"def reprocess_bugs():\n # we fetch bugs in chunks to reduce race condition chances\n pages = Paginator(Bug.objects.all(), 50)\n print 'Processing %d bugs' % pages.count\n for pnum in pages.page_range:\n for b in pages.page(pnum).object_list:\n b.save()\n sys.stdout.write('.')\n sys.stdout.flush()\n print '\\nDone.'",
"def getAllComments():",
"def handle_comments(self):\r\n comments = Comment.objects.all()\r\n for c in comments:\r\n new = ThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n user = c.user,\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = not c.is_removed\r\n )\r\n new.save()",
"def _QueryComments():\r\n tasks = []\r\n for vp_dict in request['viewpoints']:\r\n if vp_dict.get('get_comments', False):\r\n tasks.append(gen.Task(Viewpoint.QueryComments, client, vp_dict['viewpoint_id'],\r\n excl_start_key=vp_dict.get('comment_start_key', None),\r\n limit=limit))\r\n else:\r\n tasks.append(util.GenConstant(None))\r\n\r\n comment_results = yield tasks\r\n raise gen.Return(comment_results)",
"def fetch_comments(item):\n # pylint: disable=R0912\n # pylint: disable=R0914\n cw, ch, _ = getxy()\n ch = max(ch, 10)\n ytid, title = item.ytid, item.title\n dbg(\"Fetching comments for %s\", c.c(\"y\", ytid))\n writestatus(\"Fetching comments for %s\" % c.c(\"y\", title[:55]))\n qs = {'textFormat': 'plainText',\n 'videoId': ytid,\n 'maxResults': 50,\n 'part': 'snippet'}\n\n # XXX should comment threads be expanded? this would require\n # additional requests for comments responding on top level comments\n\n jsdata = call_gdata('commentThreads', qs)\n\n coms = jsdata.get('items', [])\n coms = [x.get('snippet', {}) for x in coms]\n coms = [x.get('topLevelComment', {}) for x in coms]\n # skip blanks\n coms = [x for x in coms if len(x.get('snippet', {}).get('textDisplay', '').strip())]\n if not len(coms):\n g.message = \"No comments for %s\" % item.title[:50]\n g.content = generate_songlist_display()\n return\n\n items = []\n\n for n, com in enumerate(coms, 1):\n snippet = com.get('snippet', {})\n poster = snippet.get('authorDisplayName')\n _, shortdate = yt_datetime(snippet.get('publishedAt', ''))\n text = snippet.get('textDisplay', '')\n cid = (\"%s/%s\" % (n, len(coms)))\n out = (\"%s %-35s %s\\n\" % (cid, c.c(\"g\", poster), shortdate))\n out += c.c(\"y\", text.strip())\n items.append(out)\n\n cw = Config.CONSOLE_WIDTH.get\n\n def plain(x):\n \"\"\" Remove formatting. \"\"\"\n return x.replace(c.y, \"\").replace(c.w, \"\").replace(c.g, \"\")\n\n def linecount(x):\n \"\"\" Return number of newlines. \"\"\"\n return sum(1 for char in x if char == \"\\n\")\n\n def longlines(x):\n \"\"\" Return number of oversized lines. \"\"\"\n return sum(len(plain(line)) // cw for line in x.split(\"\\n\"))\n\n def linecounter(x):\n \"\"\" Return amount of space required. \"\"\"\n return linecount(x) + longlines(x)\n\n pagenum = 0\n pages = paginate(items, pagesize=ch, delim_fn=linecounter)\n\n while 0 <= pagenum < len(pages):\n pagecounter = \"Page %s/%s\" % (pagenum + 1, len(pages))\n page = pages[pagenum]\n pagetext = (\"\\n\\n\".join(page)).strip()\n content_length = linecount(pagetext) + longlines(pagetext)\n blanks = \"\\n\" * (-2 + ch - content_length)\n g.content = pagetext + blanks\n screen_update(fill_blank=False)\n xprint(\"%s : Use [Enter] for next, [p] for previous, [q] to return:\"\n % pagecounter, end=\"\")\n v = input()\n\n if v == \"p\":\n pagenum -= 1\n\n elif not v:\n pagenum += 1\n\n else:\n break\n\n g.content = generate_songlist_display()",
"def get_comments(yt_id):\n\n client = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,developerKey=DEVELOPER_KEY)\n\n video_comments = client.commentThreads().list(\n videoId = yt_id,\n part=\"snippet,replies\").execute()\n\n comment_items = video_comments['items']\n\n class MLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.strict = False\n self.convert_charrefs= True\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\n def strip_tags(html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()\n\n comments = []\n for sub_block in comment_items:\n comments.append(strip_tags(sub_block['snippet']['topLevelComment']['snippet']['textDisplay']))\n\n comments_all = ' '.join(comments)\n\n print(\"YouTube comments scanned\")\n return comments_all",
"def ordered_crawling():\n queue.append(seed_url)\n visited.add(seed_url)\n while len(queue) >= 0:\n try:\n text = req_obj.get_html_text(queue[0])\n print queue[0]\n if text is None:\n raise requests.RequestException()\n add_links_to_queue(text, queue[0])\n # summary generated using summarizer1\n sum_obj.create_and_index_summary(\n req_obj.get_base_url(), text)\n\n # summary generated using summarizer2\n sum_obj2.create_and_index_summary(\n req_obj.get_base_url(), text)\n on_pg_sum.index_on_page_summary(text, queue[0])\n\n result_file.write(str(queue[0]) + \", \" + str(link_weights[queue[0]]))\n er_file.write(\"###########\" + str(link_weights) + \"\\n\\n\\n\\n\")\n update_weights(text)\n queue.sort(compare)\n result_file.write(\"\\n\")\n except requests.RequestException as trace:\n print str(trace) + '\\n'\n er_file.write(queue[0] + '\\n')\n er_file.write(str(trace) + '\\n\\n')\n del link_weights[queue[0]]\n queue.pop(0)",
"def run(self):\n\n # The url is too deep, skip the url.. Work is done!\n if self.depth_ > self.depth:\n return\n\n # Get doc id corresponds to the url. Add a new entry into doc index if there is no entry.\n doc_id = self.crawler.document_id(self.curr_url)\n\n # Check if the doc_id has been visited/processed by any of crawler_threads. Add doc_id to seen if not so.\n if self.crawler.checkDocVisitedAndUpdate(doc_id):\n return\n\n # Process the document corresponds to the url\n socket = None\n try:\n socket = urllib2.urlopen(self.curr_url, timeout=self.timeout)\n soup = BeautifulSoup(socket.read())\n self._curr_depth = self.depth_ + 1\n self._curr_doc_id = doc_id\n # Traverse the document as deep as possible and add those newly discovered urls into url queue\n self._index_document(soup)\n # Store (wordId, docId) and (word, url) into inverted_index and resolved_inverted_index respectively.\n self.crawler._add_words_to_document(self._curr_words, self._curr_doc_id)\n except:\n pass\n finally:\n if socket:\n socket.close()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
download sequencing file from SRA archive requires local install of SRA tools in path requires verification of filenames and paths | def download_SRA(SRA):
print("Downloading SRA archive")
output = subprocess.run(['prefetch', '-f', 'yes', SRA], stderr=subprocess.STDOUT)
print("Extracting FASTQ data")
output = subprocess.run(['fastq-dump', '--gzip', NCBI_DIR+SRA+'.sra'], stderr=subprocess.STDOUT) | [
"def get_sra(accession, temp_folder):\n\n set_up_sra_cache_folder(temp_folder)\n\n logging.info(\"Downloading {} from SRA\".format(accession))\n\n local_path = os.path.join(temp_folder, accession + \".fastq\")\n logging.info(\"Local path: {}\".format(local_path))\n\n # Download via fastq-dump\n logging.info(\"Downloading via fastq-dump\")\n run_cmds([\n \"prefetch\", accession\n ])\n run_cmds([\n \"fastq-dump\",\n \"--split-files\",\n \"--outdir\",\n temp_folder, accession\n ])\n\n # Make sure that some files were created\n msg = \"File could not be downloaded from SRA: {}\".format(accession)\n assert any([\n fp.startswith(accession) and fp.endswith(\"fastq\")\n for fp in os.listdir(temp_folder)\n ]), msg\n\n # Combine any multiple files that were found\n logging.info(\"Concatenating output files\")\n with open(local_path + \".temp\", \"wt\") as fo:\n cmd = \"cat {}/{}*fastq\".format(temp_folder, accession)\n cat = subprocess.Popen(cmd, shell=True, stdout=fo)\n cat.wait()\n\n # Remove the temp files\n for fp in os.listdir(temp_folder):\n if fp.startswith(accession) and fp.endswith(\"fastq\"):\n fp = os.path.join(temp_folder, fp)\n logging.info(\"Removing {}\".format(fp))\n os.unlink(fp)\n\n # Remove the cache file, if any\n cache_fp = \"/root/ncbi/public/sra/{}.sra\".format(accession)\n if os.path.exists(cache_fp):\n logging.info(\"Removing {}\".format(cache_fp))\n os.unlink(cache_fp)\n\n # Clean up the FASTQ headers for the downloaded file\n run_cmds([\"mv\", local_path + \".temp\", local_path])\n\n # Return the path to the file\n logging.info(\"Done fetching \" + accession)\n return local_path",
"def download_SRA(self, email, metadata_key='auto', directory='./', filetype='sra', aspera=False, keep_sra=False):\n from Bio import Entrez\n # Check download filetype\n filetype = filetype.lower()\n if filetype not in [\"sra\", \"fastq\", \"fasta\"]:\n raise Exception(\"Unknown type to downlod: %s. Use sra, fastq or fasta.\" % filetype)\n\n # Setup the query\n ftpaddres = \"ftp://ftp-trace.ncbi.nlm.nih.gov/sra/sra-instant/reads/ByExp/sra/SRX/{range_subdir}/{record_dir}/{file_dir}/{file_dir}.sra\"\n queries = []\n try:\n for sra in self.relations['SRA']:\n query = sra.split(\"=\")[-1]\n assert 'SRX' in query, \"Sample looks like it is not SRA: %s\" % query\n print(\"Query: %s\" % query)\n queries.append(query)\n except KeyError:\n raise NoSRARelationException('No relation called SRA for %s' % self.get_accession())\n\n # check if the e-mail is more or less not a total crap\n Entrez.email = email\n if not (Entrez.email is not None and '@' in email and email != '' and '.' in email):\n raise Exception('You have to provide valid e-mail')\n\n for query in queries:\n # retrieve IDs for given SRX\n searchdata = Entrez.esearch(db='sra', term=query, usehistory='y', retmode='json')\n answer = json.loads(searchdata.read())\n ids = answer[\"esearchresult\"][\"idlist\"]\n assert len(ids) == 1, \"There should be one and only one ID per SRX\"\n\n # using ID fetch the info\n number_of_trials = 10\n wait_time = 30\n for trial in range(number_of_trials):\n try:\n results = Entrez.efetch(db=\"sra\", id=ids[0], rettype=\"runinfo\", retmode=\"text\").read()\n break\n except HTTPError as httperr:\n if \"502\" in str(httperr):\n sys.stderr.write(\"Error: %s, trial %i out of %i, waiting for %i seconds.\" % (str(httperr),\n trial,\n number_of_trials,\n wait_time))\n time.sleep(wait_time)\n else:\n raise httperr\n df = DataFrame([i.split(',') for i in results.split('\\n') if i != ''][1:], columns = [i.split(',') for i in results.split('\\n') if i != ''][0])\n\n # check it first\n try:\n df['download_path']\n except KeyError as e:\n stderr.write('KeyError: ' + str(e) + '\\n')\n stderr.write(str(results) + '\\n')\n\n # make the directory\n if platform.system() == \"Windows\":\n name_regex = r'[\\s\\*\\?\\(\\),\\.\\:\\%\\|\\\"\\<\\>]'\n else:\n name_regex = r'[\\s\\*\\?\\(\\),\\.;]'\n directory_path = os.path.abspath(os.path.join(directory, \"%s_%s_%s\" % ('Supp',\n self.get_accession(),\n re.sub(name_regex, '_', self.metadata['title'][0]) # the directory name cannot contain many of the signs\n )))\n utils.mkdir_p(os.path.abspath(directory_path))\n\n for path in df['download_path']:\n sra_run = path.split(\"/\")[-1]\n print(\"Analysing %s\" % sra_run)\n url = ftpaddres.format(range_subdir=query[:6],\n record_dir=query,\n file_dir=sra_run)\n filepath = os.path.abspath(os.path.join(directory_path, \"%s.sra\" % sra_run))\n utils.download_from_url(url, filepath, aspera=aspera)\n\n if filetype in [\"fasta\", \"fastq\"]:\n if utils.which('fastq-dump') is None:\n raise NoSRAToolkitException(\"fastq-dump command not found\")\n ftype = \"\"\n if filetype == \"fasta\":\n ftype = \" --fasta \"\n cmd = \"fastq-dump --split-files --gzip %s --outdir %s %s\"\n cmd = cmd % (ftype, directory_path, filepath)\n\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n stderr.write(\"Converting to %s/%s_*.%s.gz\\n\" % (\n directory_path, sra_run, filetype))\n pout, perr = process.communicate()\n if not keep_sra:\n # Delete sra file\n os.unlink(filepath)",
"def download_sra_files(remote_location, local_location = '', max_recursion = 3, verbose = False):\n\n downloaded_files = list();\n\n def printv(*args):\n if(verbose):\n print(*args);\n sys.stdout.flush();\n\n printv(\"Reading folder: \", remote_location);\n\n req = urllib2.Request(remote_location);\n\n response = urllib2.urlopen(req);\n\n the_page = response.read();\n\n entries = the_page.split('\\r\\n');\n\n #Identify sub folders\n folders = list();\n for entry in entries:\n if(len(entry) == 0):\n continue;\n\n spl_entry = entry.split();\n if(spl_entry[0][0] == 'd'): #if directory flag\n folders.append(spl_entry[-1]);\n\n\n for folder in folders:\n dl_files = download_sra_files(remote_location + '/' + folder, local_location, max_recursion - 1, verbose);\n downloaded_files.extend(dl_files);\n\n #Identify SRA files\n files = list();\n for entry in entries:\n if(len(entry) == 0):\n continue;\n\n spl_entry = entry.split();\n if(spl_entry[0][0] == '-' and #Not a directory\n spl_entry[-1].lower().endswith('.sra')): #Has extension '.sra'\n\n files.append(spl_entry[-1]);\n\n if(len(files) > 0):\n printv(\"Identified sra files: \");\n for file_name in files:\n printv(\" \", file_name);\n\n abs_local_location = os.path.abspath(local_location);\n\n if(not os.path.isdir(abs_local_location)):\n os.makedirs(abs_local_location);\n\n for file_name in files:\n\n printv(\"Downloading \", file_name);\n\n file_str = remote_location + '/' + file_name;\n\n req = urllib2.Request(file_str);\n response = urllib2.urlopen(req);\n\n dest_file_name = abs_local_location + os.sep + file_name;\n dest_file = open(dest_file_name, 'wb');\n shutil.copyfileobj(response, dest_file)\n dest_file.close();\n downloaded_files.append(dest_file_name);\n\n return downloaded_files;",
"def s1ASFDownload(argumentList):\n\n url = argumentList[0]\n fileName = argumentList[1]\n uname = argumentList[2]\n pword = argumentList[3]\n\n session = SessionWithHeaderRedirection(uname, pword)\n #downloaded = False\n\n #while downloaded = False\n #try:\n\n print(' INFO: Downloading scene to: {}'.format(fileName))\n # submit the request using the session\n response = session.get(url, stream=True)\n\n # raise an exception in case of http errors\n response.raise_for_status()\n\n # get download size\n totalLength = int(response.headers.get('content-length', 0))\n\n # define chunksize\n chunkSize = 1024\n\n # check if file is partially downloaded\n if os.path.exists(fileName):\n firstByte = os.path.getsize(fileName)\n else:\n firstByte = 0\n\n while firstByte < totalLength:\n\n # get byte offset for already downloaded file\n header = {\"Range\": \"bytes={}-{}\".format(firstByte, totalLength)}\n response = session.get(url, headers=header, stream=True)\n\n # actual download\n with open(fileName, \"ab\") as f:\n\n if totalLength is None:\n f.write(response.content)\n else:\n pbar = tqdm.tqdm(total=totalLength, initial=firstByte, unit='B',\n unit_scale=True, desc=' INFO: Downloading ')\n for chunk in response.iter_content(chunkSize):\n if chunk:\n f.write(chunk)\n pbar.update(chunkSize)\n pbar.close()\n\n # updated fileSize\n firstByte = os.path.getsize(fileName)\n\n #if firstByte >= totalLength:\n # downloaded = True\n\n #except requests.exceptions.HTTPError as e:\n # downloaded = False\n # handle any errors here\n # print(e)",
"def get_sra(accession, temp_folder):\n\n # Set up the SRA cache folder\n set_up_sra_cache_folder(temp_folder)\n\n # Download from ENA via FTP\n url = ena_url(accession)\n\n logging.info(\"Base info for downloading from ENA: {}\".format(url))\n # There are three possible file endings\n file_endings = [\"_1.fastq.gz\", \"_2.fastq.gz\", \".fastq.gz\"]\n # Try to download each file\n for end in file_endings:\n run_cmds([\"curl\",\n \"-o\", os.path.join(temp_folder, accession + end),\n url + end], catchExcept=True)\n\n # Local paths for each of the three possible file endings\n r0_fp = \"{}/{}{}\".format(temp_folder, accession, \".fastq.gz\")\n r1_fp = \"{}/{}{}\".format(temp_folder, accession, \"_1.fastq.gz\")\n r2_fp = \"{}/{}{}\".format(temp_folder, accession, \"_2.fastq.gz\")\n\n # If the forward and reverse reads were downloaded, return that pair\n if os.path.exists(r1_fp) and os.path.exists(r2_fp):\n # Return a tuple of filepaths, and a bool indicating paired-end reads\n logging.info(\"Both forward and reverse reads were found\")\n return (r1_fp, r2_fp), True\n # If the file was downloaded with no _1/_2, return that\n elif os.path.exists(r0_fp):\n logging.info(\"Only a single set of unpaired reads were found\")\n return r0_fp, False\n # Hedging against oddly incomplete data, return either R1 or R2, if alone\n elif os.path.exists(r1_fp):\n logging.info(\"Only a single set of unpaired reads were found\")\n return r1_fp, False\n elif os.path.exists(r2_fp):\n logging.info(\"Only a single set of unpaired reads were found\")\n return r2_fp, False\n\n # If none of those URLs downloaded, fall back to trying NCBI\n logging.info(\"No data was found on ENA, falling back to SRA\")\n run_cmds([\n \"prefetch\", accession\n ])\n run_cmds([\n \"fastq-dump\",\n \"--split-files\",\n \"--outdir\",\n temp_folder, accession])\n\n # Local paths for each of the three possible file endings\n r0_fp = \"{}/{}{}\".format(temp_folder, accession, \".fastq\")\n r1_fp = \"{}/{}{}\".format(temp_folder, accession, \"_1.fastq\")\n r2_fp = \"{}/{}{}\".format(temp_folder, accession, \"_2.fastq\")\n\n # If the forward and reverse reads were downloaded, return that pair\n if os.path.exists(r1_fp) and os.path.exists(r2_fp):\n # Return a tuple of filepaths, and a bool indicating paired-end reads\n logging.info(\"Both forward and reverse reads were found\")\n return (r1_fp, r2_fp), True\n # If the file was downloaded with no _1/_2, return that\n elif os.path.exists(r0_fp):\n logging.info(\"Only a single set of unpaired reads were found\")\n return r0_fp, False\n # Hedging against oddly incomplete data, return either R1 or R2, if alone\n elif os.path.exists(r1_fp):\n logging.info(\"Only a single set of unpaired reads were found\")\n return r1_fp, False\n elif os.path.exists(r2_fp):\n logging.info(\"Only a single set of unpaired reads were found\")\n return r2_fp, False\n\n # If no files were downloaded, throw an error\n msg = \"File could not be downloaded from SRA: {}\".format(accession)\n raise Exception(msg)",
"def download_refseq_reference(reference_id, download_path):\n\n def mash_reference_id_to_ncbi_ftp_path(reference_id):\n \"\"\"\n Args:\n query_id (str): Mash reference ID (column 1 of mash dist report)\n Returns:\n list: Directory names used to locate reference genome\n on ftp://ftp.ncbi.nlm.nih.gov/genomes/all/\n For example:\n \"GCF/001/022/155\"\n \"\"\"\n prefix = reference_id.split('_')[0]\n digits = reference_id.split('_')[1].split('.')[0]\n path_list = [prefix] + [digits[i:i+3] for i in range(0, len(digits), 3)]\n\n return \"/\".join(path_list)\n\n ncbi_ftp_path = mash_reference_id_to_ncbi_ftp_path(reference_id)\n assembly = reference_id[:reference_id.find(\"_genomic.fna.gz\")]\n\n ncbi_ftp_server_base = \"ftp://ftp.ncbi.nlm.nih.gov\"\n fasta_url = \"/\".join([\n ncbi_ftp_server_base, \"genomes\", \"all\",\n ncbi_ftp_path,\n assembly,\n reference_id\n ])\n assembly_stat_url = \"/\".join([\n ncbi_ftp_server_base, \"genomes\", \"all\",\n ncbi_ftp_path,\n assembly,\n assembly + \"_assembly_stats.txt\"\n ])\n\n #fetch the files\n try:\n urllib.request.urlretrieve(fasta_url, \"/\".join([download_path, reference_id]))\n logger.info(\n \"file_downloaded\",\n timestamp=str(now()),\n url=fasta_url,\n )\n except Exception as e:\n logging.error(\n \"download_failed\",\n timestamp=str(now()),\n url=fasta_url,\n )\n try:\n urllib.request.urlretrieve(assembly_stat_url,\n \"/\".join([download_path, assembly + \"_assembly_stats.txt\"]))\n logger.info(\n \"file_downloaded\",\n timestamp=str(now()),\n url=assembly_stat_url,\n )\n except Exception as e:\n logging.error(\n \"download_failed\",\n timestamp=str(now()),\n url=assembly_stat_url,\n )",
"def download_assembly(assembly_name, assembly_dirpath):\r\n\ttries = 1\r\n\r\n\tplant_name = plant_assembly_name(assembly_name)\r\n\trefseq_path = get_download_url_from_refseq_genomes(assembly_name)\r\n\r\n\tif refseq_path is not None:\r\n\t\turl_for_assembly_file = refseq_path\r\n\telif plant_name is None:\r\n\t\turl_for_assembly_file = get_download_url_from_ucsc(assembly_name)\r\n\telse:\r\n\t\turl_for_assembly_file = get_download_url_from_ensemble_plants(plant_name, assembly_dirpath)\r\n\r\n\tfile_suffix = re.search(\"\\.((fa.*)|(2bit)|(fna.*))$\", url_for_assembly_file.split(SEP)[-1]).group()\r\n\tfile_suffix = re.sub(\"fna\", \"fa\", file_suffix)\r\n\tfa_file = assembly_dirpath + assembly_name + \".fa\"\r\n\tdownloaded_file = assembly_dirpath + assembly_name\r\n\tmax_tries = 10\r\n\twhile not os.path.exists(downloaded_file + file_suffix) and tries <= max_tries:\r\n\t\ttry:\r\n\t\t\turllib.request.urlretrieve(url_for_assembly_file, downloaded_file + file_suffix)\r\n\t\texcept Exception as err:\r\n\t\t\tif url_for_assembly_file.endswith(\"fa.gz\"):\r\n\t\t\t\tfile_suffix = \".2bit\"\r\n\t\t\t\ttry:\r\n\t\t\t\t\turllib.request.urlretrieve(url_for_assembly_file[:-6] + file_suffix, downloaded_file + file_suffix)\r\n\t\t\t\texcept:\r\n\t\t\t\t\tfile_suffix = \".fa.gz\"\r\n\t\t\t\t\ttime.sleep(60)\r\n\t\t\t\t\ttries += 1\r\n\r\n\tif not os.path.exists(downloaded_file + file_suffix): # loop has ended with no results\r\n\t\tdownload_error(assembly_dirpath)\r\n\r\n\ttry:\r\n\t\tif file_suffix == \".2bit\":\r\n\t\t\t# transform twobit to fa.gz\r\n\t\t\tos.system(TWOBITTOFA_EXE + \" \" + downloaded_file + file_suffix + \" \" + fa_file)\r\n\t\t\tos.remove(downloaded_file + \".2bit\")\r\n\t\telif file_suffix == \".fa.gz\" or file_suffix == \".fna.gz\":\r\n\t\t\t# unzip gzip (to rezip with bgzip)\r\n\t\t\tos.system(\"gunzip \" + downloaded_file + file_suffix)\r\n\r\n\texcept:\r\n\t\tset_error_message(\"Failed to extract assembly fasta file.\")\r\n\t\traise RuntimeError(\"Failed to extract assembly fasta file.\")\r\n\r\n\treturn fa_file #returns the path of the downloaded fa file\r",
"def _download(self):\n self._system.download(\"http://geant4.web.cern.ch/geant4/support/source/\" + self._tar_name)",
"def download_rna_seq(rna_seq_uuid_list, dirpath):\n data_dict = {}\n data_dict[\"ids\"] = rna_seq_uuid_list\n\n headers = {'Content-Type': 'application/json'}\n data = json.dumps(data_dict)\n\n try:\n response = requests.post('https://api.gdc.cancer.gov/data', headers=headers, data=data)\n filename = os.path.join(dirpath,response.headers[\"Content-Disposition\"].split(\"filename=\")[1])\n\n with open(filename, \"wb\") as file:\n file.write(response.content)\n file.close()\n return filename\n except:\n return None",
"def dascasi_download():\n p = argparse.ArgumentParser(description=\"download DASC all-sky camera data\")\n p.add_argument(\"site\", choices=[\"EAA\", \"FYU\", \"KAK\", \"PKR\", \"TOO\", \"VEE\"])\n p.add_argument(\n \"startend\", help=\"start/end times UTC e.g. 2012-11-03T06:23 2012-11-03T07\", nargs=2\n )\n p.add_argument(\"odir\", help=\"directory to write downloaded FITS to\")\n p.add_argument(\"-w\", \"--wavelen\", help=\"request specific wavelength(s)\", nargs=\"+\")\n p.add_argument(\"-host\", default=\"ftp://optics.gi.alaska.edu\")\n p = p.parse_args()\n\n # host = \"ftp://mirrors.arsc.edu/AMISR/PKR/DASC/RAW/\"\n download(p.startend, p.site, p.odir, p.host, p.wavelen)",
"def download(remove_archive):\n download_url = requests.get('https://cloud-api.yandex.net/v1/disk/public/resources/download', params={\n 'public_key': 'https://yadi.sk/d/TX5k2hkEm9wqZ',\n 'path': '/classification/rtsd-r3.tar.lzma'\n }).json()['href']\n\n archive_path = Path('./rtsd-r3.tar.lzma')\n with open(archive_path, 'wb') as file:\n archive_ = requests.get(download_url).content\n file.write(archive_)\n\n extract_to = Path('./datasets')\n extract_to.mkdir(parents=True, exist_ok=True)\n shutil.unpack_archive(archive_path, extract_to, format='xztar')\n os.remove(extract_to / 'rtsd-r3/.crop.swp')\n if (remove_archive):\n os.remove(archive_path)",
"def download_binary(self):\n self.archive = download_file(self.download_dir, self.client_version)",
"def _download_metafile(dataset, path=None):\n if not path:\n path = sunpy.config.get('downloads', 'sample_dir')\n base_url = 'https://spdf.gsfc.nasa.gov/pub/software/cdawlib/0MASTERS/'\n fname = dataset.lower() + '_00000000_v01.cdf'\n url = base_url + fname\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=False)\n return downloaded_file",
"def elar_download(bundle_id, phpsessid, extension):\n\n # check for validity of ID\n try:\n soasID = bundle_id.split(\"oai:soas.ac.uk:\")[1]\n except IndexError: # bundle_id does not start with oai:soas.ac.uk:, so we are not interested\n print(\"not a SOAS file\", soasID)\n return\n # prepare request\n url = \"https://elar.soas.ac.uk/Record/%s\" % soasID\n cookies = {\"PHPSESSID\": phpsessid}\n print(\"checking\", url)\n # retrieve catalog page\n with requests.Session() as s:\n r = s.post(url, cookies=cookies)\n html = r.text\n # extract links to ELAN files\n try:\n links = fromstring(html).findall(\".//tbody/tr/td/a\")\n locations = {\n a.attrib[\"href\"] for a in links if a.attrib[\"href\"].endswith(extension)\n }\n except AttributeError: # not an ELAN file\n print(\"files are not accessible\")\n return\n # dowload identified files\n if locations == []:\n print(\"files are not accessible\")\n return\n for location in locations:\n download_url = location\n bs = location.split(\"/\")[-1].split('-b-')\n if len(bs) == 1:\n collectionname = 'no_collection'\n basename = bs[0]\n else:\n collectionname = bs[0]\n basename = '-b-'.join(bs[1:])\n filepath = os.path.join('elar', collectionname, basename)\n if len(filepath) > 150:\n filepath = os.path.join('elar', collectionname, \"%s.%s\" % (hash(basename[:-4]),extension))\n print(\" downloading %s as %s:\" % (location, filepath))\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n save_file(s, filepath, download_url, cookies)",
"def download(date_array, tag, sat_id, data_path=None, user=None, password=None):\n import subprocess\n \n # currently passes things along if no user and password supplied\n # need to do this for testing\n # TODO, implement user and password values in test code\n # specific to DMSP\n if user is None:\n print ('No user information supplied for download.')\n user = 'pysat_testing'\n if password is None:\n print ('Please provide email address in password field.')\n password = 'pysat_testing@not_real_email.org'\n\n a = subprocess.check_output([\"globalDownload.py\", \"--verbose\", \n \"--url=http://cedar.openmadrigal.org\",\n '--outputDir='+data_path,\n '--user_fullname='+user,\n '--user_email='+password,\n '--user_affiliation=pysat',\n '--format=hdf5',\n '--startDate='+date_array[0].strftime('%m/%d/%Y'),\n '--endDate='+date_array[-1].strftime('%m/%d/%Y'),\n '--inst=8100',\n '--kindat='+str(madrigal_tag[sat_id][tag])])\n print ('Feedback from openMadrigal ', a)",
"def s1PepsDownload(argumentList):\n\n url = argumentList[0]\n fileName = argumentList[1]\n uname = argumentList[2]\n pword = argumentList[3]\n\n downloaded = False\n \n while downloaded is False:\n \n # get first response for file Size\n response = requests.get(url, stream=True, auth=(uname, pword))\n \n # get download size\n totalLength = int(response.headers.get('content-length', 0))\n \n # define chunksize\n chunkSize = 1024\n \n # check if file is partially downloaded\n if os.path.exists(fileName):\n \n firstByte = os.path.getsize(fileName)\n if firstByte == totalLength:\n print(' INFO: {} already downloaded.'.format(fileName))\n else:\n print(' INFO: Continue downloading scene to: {}'.format(fileName))\n \n else:\n print(' INFO: Downloading scene to: {}'.format(fileName))\n firstByte = 0\n \n if firstByte >= totalLength:\n return totalLength\n \n # get byte offset for already downloaded file\n header = {\"Range\": \"bytes={}-{}\".format(firstByte, totalLength)}\n response = requests.get(url, headers=header, stream=True, auth=(uname, pword))\n \n # actual download\n with open(fileName, \"ab\") as f:\n \n if totalLength is None:\n f.write(response.content)\n else:\n pbar = tqdm.tqdm(total=totalLength, initial=firstByte, unit='B',\n unit_scale=True, desc=' INFO: Downloading: ')\n for chunk in response.iter_content(chunkSize):\n if chunk:\n f.write(chunk)\n pbar.update(chunkSize)\n pbar.close()\n \n # zipFile check\n print(' INFO: Checking the zip archive of {} for inconsistency'.format(fileName))\n zipArchive = zipfile.ZipFile(fileName)\n zipTest = zipArchive.testzip()\n \n # if it did not pass the test, remove the file \n # in the while loop it will be downlaoded again\n if zipTest is not None:\n print(' INFO: {} did not pass the zip test. Re-downloading the full scene.'.format(fileName))\n os.remove(fileName)\n # otherwise we change the status to True\n else:\n print(' INFO: {} passed the zip test.'.format(fileName))\n downloaded = True",
"def download_from_archive(filename, sub_path='raw_files', env_var='DRAGONS_TEST'):\n # Find cache path and make sure it exists\n root_cache_path = os.getenv(env_var)\n\n if root_cache_path is None:\n raise ValueError(f'Environment variable not set: {env_var}')\n\n root_cache_path = os.path.expanduser(root_cache_path)\n\n if sub_path is not None:\n cache_path = os.path.join(root_cache_path, sub_path)\n\n if not os.path.exists(cache_path):\n os.makedirs(cache_path)\n\n # Now check if the local file exists and download if not\n local_path = os.path.join(cache_path, filename)\n if not os.path.exists(local_path):\n tmp_path = download_file(URL + filename, cache=False)\n shutil.move(tmp_path, local_path)\n\n # `download_file` ignores Access Control List - fixing it\n os.chmod(local_path, 0o664)\n\n return local_path",
"def _download_and_extract(self) -> None:\n\n # To be implemented here, the code to download from self._archive_url and to extract the\n # data into the self._path. This is the code for the case \"b\".\n print(\"Raw dataset downloaded and extracted.\")",
"def _download_karakas():\n #url = 'http://zenodo.org/record/12800/files/dartmouth.h5'\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FMNRAS%2F403%2F1413'\n import urllib\n print('Downloading Karakas 2010 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
maps reads (bowtie to rRNA for legacy?) to extract ambiguous and uniquely mapped reads | def map_reads(SRA):
#1. bowtie to rRNA
print("Bowtie alignement on contaminant RNA...")
cmd_bowtie = 'bowtie'+ ' ' + '-a' + ' ' + '-p6' + ' ' + '-S' + ' ' + '--un' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + BOWTIE_DIR+'/rRNA' + ' ' + TMP_DIR+SRA+'_trimmed.fastq' + ' ' + '|' + ' ' + 'samtools view -@ 6 -bS' + ' ' + '>' + TMP_DIR+SRA+'_trimmed_rrna.bam'
output = subprocess.run(cmd_bowtie, shell=True)
# 2. STAR to ref genome
print("STAR alignement to yeast genome...")
cmd_STAR = 'STAR --outSAMtype BAM Unsorted --runThreadN 6 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+SRA+'_STAR_'
output = subprocess.run(cmd_STAR, shell=True)
# 3. Samtools keep uniquely mapped reads and sort
print("Samtools to keep uniquely mapped reads and sort...")
cmd_samtools1 = 'samtools view -@ 6 -b -q 255 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam' + ' ' + TMP_DIR+SRA+'_STAR_Aligned.out.bam'
output = subprocess.run(cmd_samtools1, shell=True)
cmd_samtools2 = 'samtools sort -@ 6 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam'
output = subprocess.run(cmd_samtools2, shell=True)
cmd_samtools3 = 'samtools index' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam'
output = subprocess.run(cmd_samtools3, shell=True) | [
"def find_read_candidates(self, read):\n self.read_allele_dictionary = {}\n ref_alignment_start = read.reference_start\n ref_alignment_stop = self.get_read_stop_position(read)\n # if the region has reached a very high coverage, we are not going to parse through all the reads\n if self.coverage[ref_alignment_start] > 300:\n return False\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_id = read.query_name\n read_quality = read.query_qualities\n ref_sequence = self.fasta_handler.get_sequence(chromosome_name=self.chromosome_name,\n start=ref_alignment_start,\n stop=ref_alignment_stop+10)\n\n self.read_info[read_id] = (ref_alignment_start, ref_alignment_stop, read.mapping_quality, read.is_reverse)\n for pos in range(ref_alignment_start, ref_alignment_stop):\n self.read_id_by_position[pos].append((read_id, ref_alignment_start, ref_alignment_stop))\n for i, ref_base in enumerate(ref_sequence):\n self.reference_dictionary[ref_alignment_start + i] = ref_base\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n found_valid_cigar = False\n for cigar in cigar_tuples:\n cigar_code = cigar[0]\n length = cigar[1]\n # get the sequence segments that are effected by this operation\n ref_sequence_segment = ref_sequence[ref_index:ref_index+length]\n read_quality_segment = read_quality[read_index:read_index+length]\n read_sequence_segment = read_sequence[read_index:read_index+length]\n\n if cigar_code != 0 and found_valid_cigar is False:\n read_index += length\n continue\n found_valid_cigar = True\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment = \\\n self.parse_cigar_tuple(cigar_code=cigar_code,\n length=length,\n alignment_position=ref_alignment_start+ref_index,\n ref_sequence=ref_sequence_segment,\n read_sequence=read_sequence_segment,\n read_id=read_id,\n quality=read_quality_segment)\n\n # increase the read index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n\n # after collecting all alleles from reads, update the global dictionary\n for position in self.read_allele_dictionary.keys():\n if position < self.region_start_position or position > self.region_end_position:\n continue\n self.rms_mq[position] += read.mapping_quality * read.mapping_quality\n for record in self.read_allele_dictionary[position]:\n # there can be only one record per position in a read\n allele, allele_type = record\n\n if allele_type == MATCH_ALLELE or allele_type == MISMATCH_ALLELE:\n # If next allele is indel then group it with the current one, don't make a separate one\n if position + 1 <= ref_alignment_stop and position + 1 in self.read_allele_dictionary.keys():\n next_allele, next_allele_type = list(self.read_allele_dictionary[position + 1].keys())[0]\n if next_allele_type == INSERT_ALLELE or next_allele_type == DELETE_ALLELE:\n continue\n self.positional_read_info[position].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position, allele, allele_type,\n read.mapping_quality)\n else:\n # it's an insert or delete, so, add to the previous position\n self.positional_read_info[position-1].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position-1, allele, allele_type,\n read.mapping_quality)\n return True",
"def get_base_map(read):\n base_map = {}\n read_coords, ref_coords = get_coords(read)\n for read_coord, ref_coord, read_base in zip(read_coords, ref_coords, read.seq):\n base_map[ref_coord] = (read_base, read_coord)\n return base_map",
"def map_reads_2genes(self, reads_file):\n start1 = time()\n read_starts = self.__get_reads_pos(reads_file)\n start2 = time()\n times = 0\n for ref_gene in self.ref_genes:\n times += 1\n if times % 500 == 0:\n print 'calculated %d genes read count ...' % times\n if len(read_starts[ref_gene.chrom]) == 0:\n continue\n starts = read_starts[ref_gene.chrom]\n for es, ed in zip(ref_gene.exon_starts, ref_gene.exon_ends):\n # rd = starts[(starts > es) & (starts < ed)].size\n rd = cal_read_count(es, ed, starts)\n ref_gene.read_count += rd\n\n print 'start calculate rpkm ...'\n mapped_read_count = self.mapped_read_count\n for ref_gene in self.ref_genes:\n # calculate RPKM\n ref_gene.read_density = \\\n ref_gene.read_count * 1000 * 1000 * 1000. / (ref_gene.mRNA_length * mapped_read_count)\n print 'got reads time: %f' % (time() - start1)\n print 'map reads time: %f' % (time() - start2)",
"def determine_crossmapped_reads(self, read_alignment_path):\n references_by_species = self._get_references_by_species()\n crossmapped_reads = set()\n done_replicon_comparison = []\n with pysam.AlignmentFile(read_alignment_path) as bam:\n for org, replicon_ids in references_by_species.items():\n for replicon_id in replicon_ids:\n self._read_ids = set()\n # First, collect the ids of the aligned reads of\n # this replicon\n for alignment in bam.fetch(reference=replicon_id):\n self._read_ids.add(alignment.qname)\n # Then compare them to the alignments of each\n # replicon of the other organism(s)\n for (\n comp_org,\n comp_replicon_ids,\n ) in references_by_species.items():\n # Only compare replicons of different species\n if org == comp_org:\n continue\n for comp_replicon_id in comp_replicon_ids:\n comparison = sorted([replicon_id, comp_replicon_id])\n # Check if comparison of the two replicons\n # has been done already\n if comparison in done_replicon_comparison:\n continue\n done_replicon_comparison.append(comparison)\n # Compare all read ids of the comparison\n # replicon to the query replicon read ids\n for alignment in bam.fetch(\n reference=comp_replicon_id\n ):\n if alignment.qname in self._read_ids:\n crossmapped_reads.add(alignment.qname)\n no_of_crossmapped_reads = len(crossmapped_reads)\n return crossmapped_reads",
"def mapping(reads_list, k, h, index, genome):\n snps_dict = {}\n # Map the read on the genome and store the snps found\n for read in reads_list:\n reversed_read = reverse_read(read)\n reverse = False\n list_mapping = seed_and_extend(read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = False\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on straight strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n list_mapping = seed_and_extend(reversed_read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = True\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on reverse strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n reverse = False\n if VERBOSE:\n print(\"No mapping found for read number :\", reads_list.index(read) + 1)\n if list_mapping[0] < len(genome):\n for mismatch in list_mapping[2]:\n if reverse == False:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [read[mismatch - list_mapping[0]]]\n else:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(reversed_read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [reversed_read[mismatch - list_mapping[0]]]\n\n return snps_dict",
"def parse_reads_and_select_candidates(self, reads):\n st_time = time.time()\n # read_id_list = []\n total_reads = 0\n read_unique_id = 0\n for read in reads:\n # check if the read is usable\n if read.mapping_quality >= DEFAULT_MIN_MAP_QUALITY and read.is_secondary is False \\\n and read.is_supplementary is False and read.is_unmapped is False and read.is_qcfail is False:\n\n read.query_name = read.query_name + '_' + str(read_unique_id)\n if self.find_read_candidates(read=read):\n # read_id_list.append(read.query_name)\n total_reads += 1\n read_unique_id += 1\n\n if total_reads == 0:\n return []\n\n selected_allele_list = []\n postprocess_read_id_list = set()\n for pos in self.positional_allele_dictionary:\n if pos < self.region_start_position or pos > self.region_end_position:\n continue\n ref = self.reference_dictionary[pos]\n\n all_allele_dictionary = self.positional_allele_dictionary[pos]\n all_mismatch_count = 0\n for allele in all_allele_dictionary:\n all_mismatch_count += all_allele_dictionary[allele]\n\n # pick the top 2 most frequent allele\n allele_frequency_list = list(sorted(all_allele_dictionary.items(), key=operator.itemgetter(1, 0),\n reverse=True))[:PLOIDY]\n allele_list = self._filter_alleles(pos, allele_frequency_list)\n alt1 = allele_list[0] if len(allele_list) >= 1 else None\n alt2 = allele_list[1] if len(allele_list) >= 2 else '.'\n if alt1 is None:\n continue\n mq_rms = round(math.sqrt(self.rms_mq[pos]/self.coverage[pos]), 3) if self.coverage[pos] > 0 else 0\n dp = self.coverage[pos]\n ref_count = self.coverage[pos] - all_mismatch_count\n candidate_record = [self.chromosome_name] + self._get_record(pos, alt1, alt2, ref, ref_count) + [mq_rms] + [dp]\n postprocess_read_id_list.update(self.read_id_by_position[pos])\n selected_allele_list.append(candidate_record)\n\n postprocess_read_id_list = list(postprocess_read_id_list)\n if len(selected_allele_list) > 0:\n self.postprocess_reference()\n self.postprocess_reads(postprocess_read_id_list)\n\n return selected_allele_list",
"def find_read_candidates(self, read):\n ref_alignment_start = read.reference_start\n ref_alignment_stop = self.get_read_stop_position(read)\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n ref_sequence = self.fasta_handler.get_sequence(chromosome_name=self.chromosome_name,\n start=ref_alignment_start,\n stop=ref_alignment_stop)\n\n for i, ref_base in enumerate(ref_sequence):\n self._update_reference_dictionary(ref_alignment_start + i, ref_base)\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n\n for cigar in cigar_tuples:\n cigar_code = cigar[0]\n length = cigar[1]\n # get the sequence segments that are effected by this operation\n ref_sequence_segment = ref_sequence[ref_index:ref_index+length]\n read_sequence_segment = read_sequence[read_index:read_index+length]\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment, candidate_positions = \\\n self.parse_cigar_tuple(cigar_code=cigar_code,\n length=length,\n alignment_position=ref_alignment_start+ref_index,\n ref_sequence=ref_sequence_segment,\n read_sequence=read_sequence_segment,\n read_name=read.query_name)\n\n # increase the read index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n\n for pos in candidate_positions:\n if self.region_start_position <= pos <= self.region_end_position:\n percent_mismatch = int((self.mismatch_count[pos]*100) / self.coverage[pos])\n if self.mismatch_count[pos] > MIN_MISMATCH_THRESHOLD and \\\n percent_mismatch > MIN_MISMATCH_PERCENT_THRESHOLD and \\\n self.coverage[pos] > MIN_COVERAGE_THRESHOLD:\n yield pos",
"def combine_mappings(fasta_fh, mapping_fh, denoised_seqs_fh, otu_picker_otu_map_fh, out_dir):\n\n #read in mapping from split_library file\n labels = imap(lambda (a,b): a, MinimalFastaParser(fasta_fh))\n #mapping from seq_id to sample_id\n sample_id_mapping = extract_read_to_sample_mapping(labels)\n\n denoiser_mapping = read_denoiser_mapping(mapping_fh)\n #read in cd_hit otu map\n # and write out combined otu_picker+denoiser map \n otu_fh = open(out_dir+\"/denoised_otu_map.txt\",\"w\")\n for otu_line in otu_picker_otu_map_fh:\n otu_split = otu_line.split()\n \n otu = otu_split[0]\n ids = otu_split[1:]\n \n get_sample_id = sample_id_mapping.get\n #concat lists\n #make sure the biggest one is first for pick_repr\n all_ids = sort_ids(ids, denoiser_mapping)\n all_ids.extend(sum([denoiser_mapping[id] for id in ids], []))\n try:\n otu_fh.write(\"%s\\t\" % otu +\n \"\\t\".join(map(get_sample_id, all_ids))+\"\\n\")\n except TypeError:\n #get returns Null if denoiser_mapping id not present in sample_id_mapping\n print \"Found id in denoiser output, which was not found in split_libraries \"+\\\n \"output FASTA file. Wrong file?\"\n exit()\n\n fasta_out_fh = open(out_dir+\"/denoised_all.fasta\",\"w\")\n for label, seq in MinimalFastaParser(denoised_seqs_fh):\n id = label.split()[0]\n newlabel = \"%s %s\" %(sample_id_mapping[id], id)\n fasta_out_fh.write(Sequence(name= newlabel, seq=seq).toFasta()+\"\\n\")",
"def map_reads(self):\n self.settings.write_to_log('mapping reads')\n if not self.settings.get_property('force_remapping'):\n for lib_settings in self.settings.iter_lib_settings():\n if not lib_settings.mapped_reads_exist():\n break\n else:\n return\n ribo_utils.make_dir(self.rdir_path('mapped_reads'))\n map(lambda lib_setting: self.map_one_library(lib_setting, self.threads), self.settings.iter_lib_settings())\n self.settings.write_to_log( 'finished mapping reads')",
"def test_extract_read_to_sample_mapping(self):\r\n\r\n labels = [\r\n 'S160_1 E86FECS01DW5V4 orig_bc=CAGTACGATCTT new_bc=CAGTACGATCTT bc_diffs=0',\r\n 'S160_2 E86FECS01DW5V5 orig_bc=CAGTACGATCTT new_bc=CAGTACGATCTT bc_diffs=0']\r\n\r\n expected = {'E86FECS01DW5V4': 'S160_1',\r\n 'E86FECS01DW5V5': 'S160_2'}\r\n\r\n self.assertEqual(extract_read_to_sample_mapping(labels),\r\n expected)",
"def align_reads(ref=None, reads=None, out_file=None, mapper='bwa'):\n if mapper == 'smalt':\n cml = 'smalt index -k 7 -s 2 cnsref %s' % shlex.quote(ref)\n subprocess.call(shlex.split(cml))\n cml = 'smalt map -n %d -o hq_2_cons.sam -x -c 0.8 -y 0.8 cnsref %s' %\\\n (min(12, CPUS), reads)\n subprocess.call(shlex.split(cml))\n elif mapper == 'bwa':\n cml = 'bwa index -p cnsref %s' % shlex.quote(ref)\n subprocess.call(shlex.split(cml))\n cml = 'bwa mem -t %d -O 12 cnsref %s > hq_2_cons.sam' %\\\n (min(12, CPUS), reads)\n subprocess.call(cml, shell=True)\n elif mapper == 'novo':\n cml = 'novoindex cnsref.ndx %s' % ref\n subprocess.call(cml, shell=True)\n cml = 'novoalign -d cnsref.ndx -f %s -F STDFQ -o SAM > hq_2_cons.sam' \\\n % reads\n subprocess.call(cml, shell=True)\n\n cml = \\\n 'samtools view -Su hq_2_cons.sam | samtools sort -T /tmp -@ %d -o %s -'\\\n % (min(4, CPUS), out_file)\n subprocess.call(cml, shell=True)\n cml = 'samtools index %s' % out_file\n subprocess.call(cml, shell=True)\n os.remove('hq_2_cons.sam')\n\n return out_file",
"def look_for_read_in_sim(read, sim_info):\n\t\n\tsim_ints = {}\n\t\n\n\t# look through rows of sim info for matches\n\tfor sim_row in sim_info:\n\t\t\n\t\t# look in chimeric\n\t\tif read['merged']:\n\t\t\t\n\t\t\t# if read was merged, we just want to look for either read 1 or 2 annotated as chimeric\n\t\t\tfor annotated_read in sim_row['left_chimeric'].split(\";\"):\n\t\t\t\tif re.match(f\"{read['qname']}/\", annotated_read):\n\t\t\t\t\tsim_ints[f\"{sim_row['id']}_left_chimeric\"] = sim_row\n\t\t\t\t\t\n\t\t\tfor annotated_read in sim_row['right_chimeric'].split(\";\"):\n\t\t\t\tif re.match(f\"{read['qname']}/\", annotated_read):\n\t\t\t\t\tsim_ints[f\"{sim_row['id']}_right_chimeric\"] = sim_row\n\t\t\t\t\n\t\telse:\n\t\t\t# if read wasn't merged, check for this specific read number\n\t\t\tif f\"{read['qname']}/{read['num']}\" in sim_row['left_chimeric'].split(\";\"):\n\t\t\t\tsim_ints[f\"{sim_row['id']}_left_chimeric\"] = sim_row\n\t\t\n\t\t\tif f\"{read['qname']}/{read['num']}\" in sim_row['right_chimeric'].split(\";\"):\n\t\t\t\tsim_ints[f\"{sim_row['id']}_right_chimeric\"] = sim_row\n\t\t\t\n\t\t# look in discordant\n\t\tif read['qname'] in sim_row['left_discord'].split(\";\"):\n\t\t\tsim_ints[f\"{sim_row['id']}_left_discord\"] = sim_row\n\t\t\t\n\t\tif read['qname'] in sim_row['right_discord'].split(\";\"):\n\t\t\tsim_ints[f\"{sim_row['id']}_right_discord\"] = sim_row\n\t\t\t\n\treturn sim_ints",
"def _read_pyMatch(fn, precursors):\n with open(fn) as handle:\n reads = defaultdict(realign)\n for line in handle:\n query_name, seq, chrom, reference_start, end, mism, add = line.split()\n reference_start = int(reference_start)\n # chrom = handle.getrname(cols[1])\n # print(\"%s %s %s %s\" % (line.query_name, line.reference_start, line.query_sequence, chrom))\n if query_name not in reads:\n reads[query_name].sequence = seq\n iso = isomir()\n iso.align = line\n iso.start = reference_start\n iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], reference_start)\n logger.debug(\"%s %s %s %s %s\" % (query_name, reference_start, chrom, iso.subs, iso.add))\n if len(iso.subs) > 1:\n continue\n reads[query_name].set_precursor(chrom, iso)\n\n reads = _clean_hits(reads)\n return reads",
"def read_ncbi_mapping(bam_fn, gff3_fn):\n\n ret = {}\n\n # Read strand, coding_start and coding_end from GFF3 file\n gff3_data = process_gff3_file(gff3_fn)\n\n for line in pysam.view(bam_fn):\n line = line.strip()\n cols = line.split()\n\n id = cols[0]\n if '.' in id:\n id = id[:id.find('.')]\n\n if not id.startswith('NM_'):\n continue\n\n # Extract chromosome name\n chrom_nc = cols[2]\n if not chrom_nc.startswith('NC_'):\n continue\n if '.' in chrom_nc:\n chrom_nc = chrom_nc[:chrom_nc.find('.')]\n chrom = translate_chromosome_name_from_nc_id(chrom_nc)\n\n start_pos = int(cols[3]) - 1\n\n # Process cigar string and calculate exon coordinates\n cigar = cols[5]\n cigar_list = split_cigar(cigar)\n exon_lengths, intron_lengths, exon_cigars = break_into_exons(cigar_list)\n exon_starts, exon_ends = calculate_exon_coordinates(exon_lengths, intron_lengths, start_pos)\n\n # Extract strand, coding_start, coding_end from the GFF3 data\n # Note: the followings are not correct in case of multiple mappings, however transcripts with multiple mappings\n # are excluded anyway. If transcripts with multiple mappings are included in the future, the code should match the\n # corresponding mappings read from the two different sources and not always add data from the 0th mapping.\n strand = gff3_data[id][0][0]\n coding_start = gff3_data[id][0][1]\n coding_end = gff3_data[id][0][2]\n\n # Reverse order of exon coordinates for reverse-stranded transcripts\n if strand == '-':\n exon_starts = exon_starts[::-1]\n exon_ends = exon_ends[::-1]\n exon_cigars = exon_cigars[::-1]\n\n mapping = {\n 'chrom': chrom,\n 'strand': strand,\n 'exonStarts': exon_starts,\n 'exonEnds': exon_ends,\n 'coding_start': coding_start,\n 'coding_end': coding_end,\n 'exon_cigars': exon_cigars\n }\n\n if id not in ret:\n ret[id] = []\n ret[id].append(mapping)\n\n return ret",
"def extract_read_to_sample_mapping(labels):\n sample_id_mapping = {}\n\n re = compile(r'(\\S+) (\\S+)')\n for label in labels:\n tmatch = search(re, label)\n sample_id = tmatch.group(1)\n flowgram_id = tmatch.group(2)\n sample_id_mapping[flowgram_id] = sample_id\n \n return sample_id_mapping",
"def extract_reads(reads, list, out):\n record_dict = SeqIO.index(reads, \"fasta\")\n with open(out, \"wb\") as output_handle, open(list, \"r\") as ID:\n for entry in ID:\n entry = entry.replace(\"\\n\", \"\")\n output_handle.write(record_dict.get_raw(entry))",
"def combine_mappings(fasta_fh, mapping_fh, denoised_seqs_fh,\r\n otu_picker_otu_map_fh, out_dir):\r\n\r\n # read in mapping from split_library file\r\n labels = imap(lambda a_b: a_b[0], parse_fasta(fasta_fh))\r\n # mapping from seq_id to sample_id\r\n sample_id_mapping = extract_read_to_sample_mapping(labels)\r\n\r\n denoiser_mapping = read_denoiser_mapping(mapping_fh)\r\n # read in cd_hit otu map\r\n # and write out combined otu_picker+denoiser map\r\n otu_fh = open(out_dir + \"/denoised_otu_map.txt\", \"w\")\r\n for otu_line in otu_picker_otu_map_fh:\r\n otu_split = otu_line.split()\r\n\r\n otu = otu_split[0]\r\n ids = otu_split[1:]\r\n\r\n get_sample_id = sample_id_mapping.get\r\n # concat lists\r\n # make sure the biggest one is first for pick_repr\r\n all_ids = sort_ids(ids, denoiser_mapping)\r\n all_ids.extend(sum([denoiser_mapping[id] for id in ids], []))\r\n try:\r\n otu_fh.write(\"%s\\t\" % otu +\r\n \"\\t\".join(map(get_sample_id, all_ids)) + \"\\n\")\r\n except TypeError:\r\n # get returns Null if denoiser_mapping id not present in\r\n # sample_id_mapping\r\n print \"Found id in denoiser output, which was not found in split_libraries \" +\\\r\n \"output FASTA file. Wrong file?\"\r\n exit()\r\n\r\n fasta_out_fh = open(out_dir + \"/denoised_all.fasta\", \"w\")\r\n for label, seq in parse_fasta(denoised_seqs_fh):\r\n id = label.split()[0]\r\n newlabel = \"%s %s\" % (sample_id_mapping[id], id)\r\n fasta_out_fh.write(BiologicalSequence(seq, id=newlabel).to_fasta())",
"def extract_read_to_sample_mapping(labels):\r\n sample_id_mapping = {}\r\n\r\n re = compile(r'(\\S+) (\\S+)')\r\n for label in labels:\r\n tmatch = search(re, label)\r\n sample_id = tmatch.group(1)\r\n flowgram_id = tmatch.group(2)\r\n sample_id_mapping[flowgram_id] = sample_id\r\n\r\n return sample_id_mapping",
"def remap_to_genome(align, annot, mates = None):\n # if a read is unaligned retrun the same SAM line\n if align.flag & 4:\n return align.to_string()\n\n if align.reference_name not in annot:\n raise ValueError(f'{align.reference_name} not present in annotation')\n\n transcript = annot[align.reference_name]\n\n flag = align.flag\n sequence = align.query_sequence\n if transcript.strand == '-':\n flag ^= 16\n sequence = reverse_complement(sequence)\n\n exon_lens = [e.end - e.start + 1 for e in transcript.exons]\n\n # alignment of read to transcript left-most position \n align_start = align.reference_start + 1\n cigartuples = align.cigartuples\n\n # if transcript is annotated on the negative negative strand of the genome\n # go from the end of the read alignment to the transcript:\n # find alignment position from sequence end and reverse CIGAR\n if transcript.strand == '-':\n align_start = sum(exon_lens) - get_alignment_end(align) + 1\n cigartuples = reversed(cigartuples)\n\n # find read alignment to the genome start position\n cum_exon_lens = [sum(exon_lens[:i + 1]) for i in range(0, len(exon_lens))]\n ind = bisect.bisect_left(cum_exon_lens, align_start)\n if ind > 0 and cum_exon_lens[ind] == align_start:\n ind += 1\n if ind == 0:\n start = transcript.exons[ind].start + align_start - 1\n else:\n start = transcript.exons[ind].start + align_start - 1 - \\\n cum_exon_lens[ind - 1]\n\n op2cigar = {sam.CIGAR_MATCH: 'M', sam.CIGAR_INSERTION: 'I',\n sam.CIGAR_DELETION: 'D', sam.CIGAR_SOFT_CLIP: 'S'}\n\n # generate new CIGAR\n cigar = ''\n g_pos = start\n for op, num in cigartuples:\n if op == sam.CIGAR_INTRON:\n raise RuntimeError('Bad alignment')\n\n if op in [sam.CIGAR_MATCH, sam.CIGAR_DELETION]:\n\n bases_left = num\n while bases_left > 0:\n\n # if rthe current alignment segment ends within current exon\n if g_pos + bases_left <= transcript.exons[ind].end:\n cigar += f'{bases_left}{op2cigar[op]}'\n g_pos += bases_left\n break\n else:\n\n # if the current alignment segment spans an intron\n exon_bases = transcript.exons[ind].end - g_pos + 1\n bases_left -= exon_bases\n g_pos += exon_bases\n cigar += f'{exon_bases}{op2cigar[op]}'\n if bases_left > 0:\n intron = transcript.exons[ind + 1].start - transcript.exons[ind].end - 1\n cigar += f'{intron}N'\n g_pos += intron\n ind += 1\n\n elif op in [sam.CIGAR_INSERTION, sam.CIGAR_SOFT_CLIP]:\n cigar += f'{num}{op2cigar[op]}'\n else:\n raise ValueError(f'Unsupported CIGAR operation: {op}')\n\n # compute mate start for paired reads\n mate_name = '*'\n mate_start = 0\n if align.flag & 1 and align.next_reference_name is not None :\n if align.next_reference_name == align.reference_name:\n mate_name = '='\n mate_start = transcript2genome([align.next_reference_start], align,\n transcript)\n\n else:\n # get alignment start for a new transcript\n mate_transcript = annot[align.next_reference_name]\n mate_name = annot[align.next_reference_name].seqid\n\n mate_align = mates[(align.next_reference_name,\n align.next_reference_start)]\n\n mate_start = transcript2genome([align.next_reference_start],\n mate_align,\n mate_transcript)\n\n\n result = f'{align.query_name}\\t{flag}\\t{annot[align.reference_name].seqid}'\\\n f'\\t{start}\\t255\\t{cigar}\\t{mate_name}\\t{mate_start}\\t0' \\\n f'\\t{sequence}\\t*'\\\n f'\\tNH:i:{align.get_tag(\"NH\")}\\tAS:i:{align.get_tag(\"AS\")}'\\\n f'\\tNM:i:{align.get_tag(\"NM\")}'\n\n return result"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
wrapper to run scikitribo from the same pipeline requires local install of modified scikitribo toolbox requires local install of all dependencies of scikitribo environment (see conda environment file) | def run_scikit_ribo(SRA, genome_fasta, genome_gtf):
# 3. Scikit-ribo index
print("Building scikit-ribo index")
if not os.path.exists(SCIKIT_DIR):
os.mkdir(SCIKIT_DIR)
cmd_scikit = 'python' + ' ' + SCIKIT_PATH + 'scikit-ribo-build.py' + ' ' + '-g' + ' ' + genome_gtf + ' ' + '-f' + ' ' + genome_fasta + ' ' + '-p' + ' ' + SRA + ' ' + '-o' + SCIKIT_DIR
output = subprocess.run(cmd_scikit, shell=True)
print("scikit-ribo-run.py...")
cmd_scikit = 'python' + ' ' + SCIKIT_PATH + 'scikit-ribo-run.py' + ' ' + '-i' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + '-f' + ' ' + SCIKIT_DIR + ' ' + '-p' + ' ' + SRA + ' ' + '-o' + ' ' + 'TMP/scikit_'+SRA
output = subprocess.run(cmd_scikit, shell=True)
print("plot_ribo_density_dict.py...")
cmd_scikit = 'python' + ' ' + SCIKIT_PATH + 'plot_ribo_density_dict_noCDT.py' + ' ' + '-i' + ' ' + TMP_DIR+'scikit_'+SRA+'/riboseq_input.txt' + ' ' + '-g' + ' ' + 'all' + ' ' + '-o' + ' ' + TMP_DIR+'scikit_'+SRA #+'_profiles'
output = subprocess.run(cmd_scikit, shell=True) | [
"def lab(session):\n session.install(\"-r\", \"requirements.txt\")\n session.run(\"jupyter\", \"lab\")",
"def test_toolchain_bootstrap(orchestra: OrchestraShim):\n # Print unsolved graph\n orchestra(\"graph\", \"-b\", \"gcc\")\n\n # Print solved graph\n orchestra(\"graph\", \"-b\", \"-s\", \"gcc\")\n\n # Install\n orchestra(\"install\", \"-b\", \"gcc\")",
"def setup(c, version=None):\n version = version or '3.8'\n suffix = '' if version == '3.8' else version.replace('.', '')\n env_name = f'sk-eval{suffix}'\n\n c.run(f'conda create --name {env_name} python={version} --yes')\n c.run('eval \"$(conda shell.bash hook)\" '\n f'&& conda activate {env_name} '\n '&& pip install --editable .[all] '\n '&& pip install invoke lxml'\n ) # lxml needed for NotebookCollection.py example\n\n print(f'Done! Activate your environment with:\\nconda activate {env_name}')",
"def install_sm_local_dependencies(framework, job_type, image, ec2_conn, ec2_instance_ami):\n python_invoker = get_python_invoker(ec2_instance_ami)\n # Install custom packages which need to be latest version\"\n # using virtualenv to avoid package conflicts with the current packages\n ec2_conn.run(f\"sudo apt-get install virtualenv -y \")\n ec2_conn.run(f\"virtualenv env --python {python_invoker}\")\n ec2_conn.run(f\"source ./env/bin/activate\")\n if framework == \"pytorch\":\n # The following distutils package conflict with test dependencies\n ec2_conn.run(\"sudo apt-get remove python3-scipy python3-yaml -y\")\n ec2_conn.run(f\"sudo {python_invoker} -m pip install -r requirements.txt \", warn=True)",
"def bioconda_setup(request: pytest.FixtureRequest) -> Tuple[Optional[int], str]:\n\n assert request.config.cache\n deps_dir = request.config.cache.get(\"bioconda_deps\", None)\n if deps_dir is not None and not Path(deps_dir).exists():\n # cache value set, but cache is gone :( ... recreate\n deps_dir = None\n\n if deps_dir is None:\n given_basetemp = request.config.option.basetemp\n if given_basetemp is not None:\n basetemp = Path(os.path.abspath(str(given_basetemp))).resolve()\n deps_dir = basetemp / \"bioconda\"\n else:\n from_env = os.environ.get(\"PYTEST_DEBUG_TEMPROOT\")\n temproot = Path(from_env or tempfile.gettempdir()).resolve()\n rootdir = temproot.joinpath(f\"pytest-of-{getuser() or 'unknown'}\")\n try:\n rootdir.mkdir(mode=0o700, exist_ok=True)\n except OSError:\n rootdir = temproot.joinpath(\"pytest-of-unknown\")\n rootdir.mkdir(mode=0o700, exist_ok=True)\n deps_dir = rootdir / \"bioconda\"\n request.config.cache.set(\"bioconda_deps\", str(deps_dir))\n\n deps_dirpath = Path(deps_dir)\n deps_dirpath.mkdir(parents=True, exist_ok=True)\n\n wflow = get_data(\"tests/seqtk_seq.cwl\")\n job = get_data(\"tests/seqtk_seq_job.json\")\n error_code, _, stderr = get_main_output(\n [\n \"--outdir\",\n str(deps_dirpath / \"out\"),\n \"--beta-conda-dependencies\",\n \"--beta-dependencies-directory\",\n str(deps_dirpath / \"deps\"),\n \"--debug\",\n wflow,\n job,\n ]\n )\n return error_code, stderr",
"def upgrade_thirdparty_tools(args, remotes):\n s = {\"fabricrc_overrides\": {\"system_install\": args.tooldir,\n \"local_install\": os.path.join(args.tooldir, \"local_install\"),\n \"distribution\": args.distribution,\n \"use_sudo\": args.sudo,\n \"edition\": \"minimal\"}}\n s = _default_deploy_args(args)\n s[\"flavor\"] = \"seqcluster_flavor\",\n s[\"actions\"] = [\"install_biolinux\"]\n s[\"fabricrc_overrides\"][\"system_install\"] = args.tooldir\n s[\"fabricrc_overrides\"][\"local_install\"] = os.path.join(args.tooldir, \"local_install\")\n cbl = get_cloudbiolinux(remotes)\n sys.path.insert(0, cbl[\"dir\"])\n cbl_deploy = __import__(\"cloudbio.deploy\", fromlist=[\"deploy\"])\n cbl_deploy.deploy(s)\n manifest_dir = os.path.join(_get_data_dir(), \"manifest\")\n print(\"Creating manifest of installed packages in %s\" % manifest_dir)\n cbl_manifest = __import__(\"cloudbio.manifest\", fromlist=[\"manifest\"])\n if os.path.exists(manifest_dir):\n for fname in os.listdir(manifest_dir):\n if not fname.startswith(\"toolplus\"):\n os.remove(os.path.join(manifest_dir, fname))\n cbl_manifest.create(manifest_dir, args.tooldir)",
"def install_VibronicToolkit():\n url = \"https://github.com/ngraymon/VibronicToolkit.jl.git\"\n package_name = \"VibronicToolkit\"\n branch_name = \"integrated\"\n\n # should add a try catch so that Julia doesn't error out if the package is already installed?\n # TODO - update this section once the standard approach is finalized and bugs are ironed out in the v1.0.X versions of Julia\n\n # This approach is functional but there is probably a better way to do this\n cmd = 'using Pkg;'\n cmd += f'Pkg.add(PackageSpec(url=\"{url:s}\", rev=\"{branch_name:s}\"));'\n subprocess.run(['julia', '-e', cmd])\n\n # we trust that the install was successful\n # although it would be nice to have a clean way to confirm the install was successful\n return",
"def work(env):\n\n macro_args(env)\n xbc.prep(env)\n env['xed_dir'] = env['src_dir']\n verify_args(env)\n start_time=mbuild.get_time()\n update_version(env)\n init_once(env)\n init(env)\n if env['setup_hooks']:\n setup_hooks(env)\n xbc.cexit(0)\n if 'clean' in env['targets'] or env['clean']:\n xbc.xed_remove_files_glob(env)\n if len(env['targets'])<=1:\n xbc.cexit(0)\n\n mbuild.cmkdir(env['build_dir'])\n mbuild.cmkdir(mbuild.join(env['build_dir'], 'include-private'))\n work_queue = mbuild.work_queue_t(env['jobs']) \n\n input_files = build_libxed(env, work_queue)\n\n env['enc2_configs'] = [] # used for installing kits\n if env['enc2']:\n configs = [ enc2_config_t(64,64), # popular\n enc2_config_t(32,32), \n #enc2_config_t(16,16), # infrequent\n #enc2_config_t(64,32), # obscure \n #enc2_config_t(32,16), # more obscure\n #enc2_config_t(16,32) # more obscure\n ]\n\n test_libs = []\n for config in configs: \n (shd_enc2,lnk_enc2, shd_chk, lnk_chk) = build_libxedenc2(env, work_queue, input_files, config)\n test_libs.append((shd_enc2, lnk_enc2, shd_chk, lnk_chk))\n env['enc2_configs'].append(config)\n legal_header_tagging(env)\n _prep_kit_dirs(env)\n create_working_kit_structure(env,work_queue) # wkit\n create_install_kit_structure(env,work_queue) # ikit\n\n build_examples(env) # in the working kit now\n _copy_examples_to_bin(env,env['wkit'])\n _copy_dynamic_libs_to_kit(env,env['wkit'])\n _test_examples(env)\n\n copy_working_kit_to_install_dir(env)\n # put the doxygen in working kit, if not installing, and the final\n # kit if installing.\n create_doxygen_api_documentation(env, work_queue)\n compress_kit(env)\n mbuild.vmsgb(1, \"XED KIT BUILD COMPLETE\")\n \n system_install(env,work_queue) # like in /usr/local/{lib,include/xed}\n make_doxygen_build(env,work_queue)\n retval = run_tests(env)\n\n end_time=mbuild.get_time()\n mbuild.vmsgb(1, \"ELAPSED TIME\", mbuild.get_elapsed_time(start_time,\n end_time))\n mbuild.vmsgb(1, \"RETVAL={}\".format(retval))\n return retval",
"def transformers_environment(use_gpu=True):\n\n pip_requirements_path = str(Path(__file__).parent.joinpath(\"requirements.txt\"))\n print(f\"Create Azure ML Environment from {pip_requirements_path}\")\n\n if use_gpu:\n\n env_name = \"transformers-gpu\"\n env = Environment.from_pip_requirements(\n name=env_name, file_path=pip_requirements_path\n )\n env.docker.base_image = (\n \"mcr.microsoft.com/azureml/intelmpi2018.3-cuda10.0-cudnn7-ubuntu16.04\"\n )\n\n else:\n\n env_name = \"transformers-cpu\"\n env = Environment.from_pip_requirements(\n name=env_name, file_path=pip_requirements_path\n )\n\n return env",
"def installRequiredPackages(self, force=False):\n # Need to install if forced or any packages cannot be imported\n needToInstall = force\n if not needToInstall:\n try:\n import jupyter\n import jupyterlab\n import ipywidgets\n import pandas\n import ipyevents\n import ipycanvas\n except:\n needToInstall = True\n\n if needToInstall:\n # Install required packages\n import os\n if os.name != 'nt':\n # PIL may be corrupted on linux, reinstall from pillow\n slicer.util.pip_install('--upgrade pillow --force-reinstall')\n\n slicer.util.pip_install(\"jupyter jupyterlab ipywidgets pandas ipyevents ipycanvas --no-warn-script-location\")\n\n # Install Slicer Jupyter kernel\n # Create Slicer kernel\n slicer.modules.jupyterkernel.updateKernelSpec()\n # Install Slicer kernel\n import jupyter_client\n jupyter_client.kernelspec.KernelSpecManager().install_kernel_spec(slicer.modules.jupyterkernel.kernelSpecPath(), user=True, replace=True)",
"def install_requirements():\n local('. fabric_factory/ve/bin/activate; easy_install pip')\n local('. fabric_factory/ve/bin/activate; pip install -r requirements.txt')",
"def setup(ctx):\n ctx.run(\"virtualenv venv && source venv/bin/activate\")\n ctx.run(\"pip install -r requirements.txt -r requirements-dev.txt\")",
"def test_engines_setup_global_args():\n\n juice_dataframe = pycaret.datasets.get_data(\"juice\")\n exp = pycaret.classification.ClassificationExperiment()\n\n # init setup\n exp.setup(\n juice_dataframe,\n target=\"Purchase\",\n remove_multicollinearity=True,\n multicollinearity_threshold=0.95,\n log_experiment=True,\n html=False,\n session_id=123,\n n_jobs=1,\n engines={\"lr\": \"sklearnex\"},\n )\n\n #### Default Model Engine ----\n assert exp.get_engine(\"lr\") == \"sklearnex\"\n model = exp.create_model(\"lr\")\n assert isinstance(\n model, daal4py.sklearn.linear_model.logistic_path.LogisticRegression\n )",
"def resource_manager(args):\n conf = oci_config.OCIConfig(args.config, regions=args.regions, profile=args.profile)\n\n training_tools.run(conf)",
"def setup_pipeline(self, *args, **kwargs):",
"def local_osx():\n env.skos_home = '~/talks'\n env.current_skos = '~/talks/current_fast_topics.nt'\n env.tdb_store = '~/talks/tdb_store'\n env.skos_jar = '~/Documents/Projects/skos-suggester/target/skos-suggester-1.0-SNAPSHOT.jar'\n env.skos_jar_conf_file = '~/Documents/Projects/skos-suggester/src/main/resources/configuration.yml'\n env.tdbloader2_path = 'tdbloader2'\n env.run = local",
"def deploy():\n\n sync()\n\n with prefix('cd {} && source activate carnd-term1'.format(WORKDIR)):\n\n run('python train.py')\n\n fetch_models()",
"def test__gromacs_kernel(self):\n k = radical.ensemblemd.Kernel(name=\"md.gromacs\")\n k.arguments = [\"--grompp=grompp.mdp\",\"--topol=topol.top\"]\n _kernel = k._bind_to_resource(\"*\")\n assert type(_kernel) == radical.ensemblemd.kernel_plugins.md.gromacs.Kernel, _kernel\n\n # Test kernel specifics here:\n k = radical.ensemblemd.Kernel(name=\"md.gromacs\")\n k.arguments = [\"--grompp=grompp.mdp\",\"--topol=topol.top\"]\n\n k._bind_to_resource(\"*\")\n assert k._cu_def_executable == \"python\", k._cu_def_executable\n assert k.arguments == ['run.py','--mdp','grompp.mdp','--gro','start.gro','--top','topol.top','--out','out.gro'], k.arguments\n assert k._cu_def_pre_exec == [], k._cu_def_pre_exec\n assert k._cu_def_post_exec == None, k._cu_def_post_exec\n\n k._bind_to_resource(\"stampede.tacc.utexas.edu\")\n assert k._cu_def_executable == [\"python\"], k._cu_def_executable\n assert k.arguments == ['run.py','--mdp','grompp.mdp','--gro','start.gro','--top','topol.top','--out','out.gro'], k.arguments\n assert k._cu_def_pre_exec == [\"module load gromacs python mpi4py\"], k._cu_def_pre_exec\n assert k._cu_def_post_exec == None, k._cu_def_post_exec",
"def set_recipes():\n\n from .loaders.make_rsid_lookup import MakeSnpToRsid\n\n B37_SAMPLE_GENES = (\n ('2', 21224301, 21266945), # APOB\n ('19', 45409039, 45412650), # APOE\n ('17', 41196312, 41277500), # BRCA1\n ('13', 32889617, 32973809), # BRCA2\n ('10', 135340300, 135352627), # CYP2E1\n ('16', 53737875, 54148379), # FTO\n ('15', 28356183, 28567313), # HERC2\n ('1', 55505149, 55530526), # PCSK9\n ('10', 114709978, 114927437), # TCF7L2\n )\n\n B38_SAMPLE_GENES = (\n ('2', 21001429, 21044073), # APOB\n ('19', 44905796, 44909395), # APOE\n ('17', 43044295, 43125364), # BRCA1\n ('13', 32315508, 32400268), # BRCA2\n ('10', 133527363, 133539123), # CYP2E1\n ('16', 53703963, 54121941), # FTO\n ('15', 28111040, 28322179), # HERC2\n ('1', 55039548, 55064853), # PCSK9\n ('10', 112950247, 113167678), # TCF7L2\n )\n\n manager.add_recipe(\n 'snp_to_rsid',\n MakeSnpToRsid('GRCh37'),\n label='Find rsID information given chrom/pos/ref/alt',\n genome_build='GRCh37'\n )\n\n manager.add_recipe(\n 'snp_to_rsid_test',\n MakeSnpToRsid('GRCh37', sample_regions=B37_SAMPLE_GENES),\n label='Find rsID information given chrom/pos/ref/alt',\n genome_build='GRCh37'\n )\n\n manager.add_recipe(\n 'snp_to_rsid',\n MakeSnpToRsid('GRCh38'),\n label='Find rsID information given chrom/pos/ref/alt',\n genome_build='GRCh38'\n )\n\n manager.add_recipe(\n 'snp_to_rsid_test',\n MakeSnpToRsid('GRCh38', sample_regions=B38_SAMPLE_GENES),\n label='Find rsID information given chrom/pos/ref/alt',\n genome_build='GRCh38'\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns dictionary with strand orientation as values and geneIDs as Keys/ | def gather_strand_by_geneID_dict(genome_gtf):
strand_by_geneID_dict = {}
with open(genome_gtf) as f:
for line in f:
current_line = line.split('\t')
if current_line[2] == "CDS":
current_orf = current_line[8].split(';')[2].split()[1].strip('\"')
current_strand = current_line[6]
strand_by_geneID_dict[current_orf] = current_strand
return strand_by_geneID_dict | [
"def _get_gene_map(self) -> OrderedDict:\n if \"gene\" not in self.data:\n return OrderedDict()\n\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data[\"gene\"].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(\",\"):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes",
"def init_gene():\n gene_info = dict(\n id = '',\n name = '',\n source = '',\n strand = '',\n chr = '',\n transcripts = [],\n exons = [],\n is_alt_spliced = 0\n )\n return gene_info",
"def organize_by_chromosome(genes, transcripts):\n gene_dict = {}\n transcript_dict = {}\n\n for ID in genes:\n gene = genes[ID]\n chromosome = gene.chromosome\n if chromosome not in gene_dict:\n chrom_genes = {}\n chrom_genes[ID] = gene\n gene_dict[chromosome] = chrom_genes\n gene_dict[chromosome][ID] = gene\n\n for ID in transcripts:\n transcript = transcripts[ID]\n chromosome = transcript.chromosome\n if chromosome not in transcript_dict:\n chrom_transcripts = {}\n chrom_transcripts[ID] = transcript\n transcript_dict[chromosome] = chrom_transcripts\n transcript_dict[chromosome][ID] = transcript\n transcript_dict[chromosome][ID] = transcript\n\n return gene_dict, transcript_dict",
"def _load_orgs_and_genes(self):\n organisms = {}\n genes = {}\n for gene in self.gene_ids:\n org_file_path = self._get_organisms_file_path(gene[self.GENE_NAME_IDX], gene[self.GENE_ID_IDX])\n with open(org_file_path, \"r\") as orgs:\n org = orgs.read().splitlines()\n genes[gene[self.GENE_NAME_IDX]] = {}\n # we only care about unique organisms\n for o in org:\n if not o.startswith(\">\"):\n continue\n clean_o = o.replace(\">\", \"\", 1).replace(\"_\", \" \").title()\n # I hate to do this but there's a special case for Canis Familiaris\n # EBI does not recognize it but it does recognize Canis Lupus (Canis Lupus Familiaris)\n if \"Canis Familiaris\" in clean_o:\n clean_o = \"Canis lupus\"\n if not organisms.get(clean_o):\n organisms[clean_o] = {self.FREQ_KEY: 1, self.GENE_IDS_KEY: [gene]}\n else:\n organisms[clean_o][self.FREQ_KEY] = organisms[clean_o][self.FREQ_KEY] + 1\n organisms[clean_o][self.GENE_IDS_KEY].append(gene)\n genes[gene[self.GENE_NAME_IDX]][clean_o] = 1\n return organisms, genes",
"def build_gene_indexes(df):\n\tgeneDict = OrderedDict()\n\n\tgeneCount = 0\n\tpreviousGeneIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_gene=\"\"\n\n\tfor i in range(len(df)):\n\n\t\tif df.loc[i,'feature'] == 'gene':\n\t\t\ttrdict = parse_entry(df.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\t\t\n\t\t\tif geneCount != 0:\n\t\t\t\tnewGeneIndex = i\n\t\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\t\t\t\tpreviousGeneIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\tgeneCount += 1\n\n\t\t\telse:\n\t\t\t\tnewgeneIndex = 0\n\t\t\t\tgeneCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\tif i == (len(df)-1):\n\t\t\tnewGeneIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\treturn geneDict",
"def __makeGeneCodebook():\n three_letters = (\n 'PHE', 'LEU', 'ILE', 'MET', 'VAL', 'SER', 'PRO', 'THR', 'ALA',\n 'TYR', 'HIS', 'GLN', 'ASN', 'LYS', 'ASP', 'GLU', 'CYS', 'TRP',\n 'ARG', 'GLY')\n\n one_letter = ('F', 'L', 'I', 'M', 'V', 'S', 'P', 'T', 'A', 'Y',\n 'H', 'Q', 'N', 'K', 'D', 'E', 'C', 'W', 'R', 'G')\n\n two_way = __TwoWayDict()\n for k, v in zip(three_letters, one_letter):\n two_way[k] = v\n\n return two_way",
"def genes_dict(self):\n genes = dict()\n logging.info('Building gene dictionary...')\n\n if not self.genes_uri:\n logging.warn('No BIOINDEX_GENES_URI; no gene dictionary built...')\n return genes\n\n # if a local file, just use the path, otherwise the entire uri\n uri = urllib.parse.urlparse(self.genes_uri)\n if not uri.scheme or uri.scheme == 'file':\n uri = uri.path\n\n # open the file, which may be remote\n for chromosome, source, typ, start, end, attributes in read_gff(uri):\n region = RegionLocus(chromosome, start, end)\n symbol = attributes.get('ID') or attributes.get('Name')\n alias = attributes.get('Alias')\n\n # add to the gene dictionary\n if symbol:\n genes[symbol.upper()] = region\n\n # add any aliases as well\n if alias:\n for symbol in alias.split(','):\n genes[symbol.strip().upper()] = region\n\n return genes",
"def makeRegions(tid_exons):\n tid_regions = {}\n for tid in tid_exons:\n exons = sorted(tid_exons[tid], key=lambda x: x.start)\n start = exons[0].start\n end = exons[0].end\n strand = exons[0].strand\n regions = []\n for idx, exon in enumerate(exons):\n if exon.start <= end:\n end = max(end, exon.end)\n else:\n regions.append((start, end))\n start = exon.start\n end = exon.end\n if len(regions) == 0 or regions[-1] != (start, end):\n regions.append((start, end))\n tid_regions[tid] = regions\n return tid_regions",
"def init_gene():\n gene_details=dict(\n id = '', \n anno_id = [],\n confgenes_id = [],\n name = '',\n source = '',\n gene_info = {},\n alias = '',\n name2 = [],\n strand = '',\n chr = '',\n chr_num = [],\n paralogs = [],\n start = '',\n stop = '',\n transcripts = [],\n transcript_info = [],\n transcript_status = [],\n transcript_valid = [],\n exons = [],\n exons_confirmed = [],\n cds_exons = [],\n utr5_exons = [],\n utr3_exons = [],\n tis = [],\n tis_conf = [],\n tis_info = [],\n cdsStop = [],\n cdsStop_conf = [],\n cdsStop_info = [],\n tss = [],\n tss_info = [],\n tss_conf = [],\n cleave = [],\n cleave_info = [],\n cleave_conf = [],\n polya = [],\n polya_info = [],\n polya_conf = [],\n is_alt = [],\n is_alt_spliced = 0,\n is_valid = [],\n transcript_complete = [],\n is_complete = [],\n is_correctly_gff3_referenced = '',\n splicegraph = []\n )\n return gene_details",
"def create_species_encode():\n\tdata = pd.read_csv(\"../train.csv\")\n\tspecies = sorted(data.species.unique())\n\tspecies_dict = {species: index for index, species in enumerate(species)}\n\treturn species_dict",
"def get_gene_transcript_map(db_path, table=Annotation.__tablename__, index_col='TranscriptId'):\n df = read_attrs(db_path, table, index_col).reset_index()\n r = {}\n for gene_id, s in df.groupby('GeneId'):\n r[gene_id] = s.TranscriptId.tolist()\n return r",
"def region_ids(self):\n\n # collect data\n abbrevs = self.abbrevs\n names = self.names\n numbers = self.numbers\n # combine data and make a mapping\n all_comb = zip(numbers + abbrevs + names, (numbers * 3))\n region_ids = {key: value for key, value in all_comb}\n return region_ids",
"def get_transcript_gene_map(db_path, table=Annotation.__tablename__, index_col='TranscriptId'):\n df = read_attrs(db_path, table, index_col)\n return dict(list(zip(df.index, df.GeneId)))",
"def genotype_to_dict(all_genotypes):\n\n rows = {}\n\n for rs, individuals in all_genotypes.items():\n rows.setdefault(rs, {})\n for individual, genotypes in individuals.items():\n rows[rs].setdefault(individual, \"\")\n for genotype in genotypes:\n rows[rs][individual] =\\\n genotype[\"genotype\"].replace(\"|\", \"\")\n break\n\n return rows",
"def name_genes_entrez(gene_names: list, key_entrez: bool, organism: int = ORGANISM) -> dict:\n entrez_names = dict()\n matcher = GeneMatcher(organism)\n matcher.genes = gene_names\n for gene in matcher.genes:\n name = gene.input_identifier\n entrez = gene.gene_id\n if entrez is not None:\n if key_entrez:\n entrez_names[entrez] = name\n else:\n entrez_names[name] = entrez\n return entrez_names",
"def build_seq_dict(gene_regions, assembly):\n gene_sequences = dict()\n for gene in gene_regions:\n chrom, start, end, name, exCt, strand = gene_regions[gene]\n seq = call_twoBitToFa(chrom, start, end, assembly)\n gene_sequences[gene] = seq\n return gene_sequences",
"def _stable_ids_from_ambig(ambig_map):\n # {qiimeid: stableid}\n ambig_assoc = {}\n\n # {rid: sampleid}\n ri = {}\n\n for k, v in ambig_map.items():\n for unambig in v:\n tag, untagged = unambig.split('_', 1)\n stab = \"%s.%s\" % (untagged, tag)\n ambig_assoc[stab] = k\n ri[unambig] = stab\n\n return ambig_assoc, ri",
"def get_text_mining_mir_dictionary():\n if logger.getEffectiveLevel() == logging.DEBUG or not os.path.exists(OUT_MIR_ALIAS_FILE):\n __create_mir_alias_dictionary__()\n\n mir_alias_to_identifier = {}\n with gzip.open(OUT_MIR_ALIAS_FILE, 'rb') as mir_alias_file:\n for line in mir_alias_file:\n tax_id, mir_id, mir_alias = line.rstrip('\\r\\n').split('\\t')\n mir_alias_to_identifier[(tax_id, mir_alias)] = mir_id\n return mir_alias_to_identifier",
"def get_gene_id_dict(list_of_results):\n dict1 = {}\n for i, dict2 in enumerate(list_of_results):\n key = dict2[\"GeneID\"]\n if key in dict1.keys():\n # list1 = dict1[key]\n # list1.append(list_of_results[i])\n # dict1[key] = list1\n # list1.append(list_of_results[i])\n dict1[key].append(list_of_results[i])\n else:\n dict1[key] = [list_of_results[i]]\n return dict1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine relevant entries in crkeng.xml and build a smaller xml file for testing. | def build_test_xml():
crkeng_file_path = find_latest_xml_file(shared_res_dir / "dictionaries")
print(f"Building test dictionary files using {crkeng_file_path.name}")
crkeng_root = ET.parse(str(crkeng_file_path)).getroot()
# relevant entries in crkeng.xml file we want to determine
relevant_xml_ls: Set[str] = set()
xml_ls: Set[str] = set()
crkeng_entries = crkeng_root.findall(".//e")
for element in crkeng_entries:
xml_l = extract_l_str(element)
xml_ls.add(xml_l)
test_words = get_test_words()
print(f"Analyzing xml l elements and test words")
word_to_analyses = morphodict.analysis.relaxed_analyzer().bulk_lookup(
xml_ls | test_words
)
print("Analysis done")
test_word_lemmas: Set[str] = set()
for test_word in test_words:
for analysis in word_to_analyses[test_word]:
lemma = fst_analysis_parser.extract_lemma(analysis)
if lemma is None:
logger.warn(
"Skipping test word: %s. "
"Could not extract lemma from its analysis: %s",
test_word,
analysis,
)
continue
test_word_lemmas.add(lemma)
for xml_l in tqdm(xml_ls, desc="screening relevant entries in crkeng.xml"):
if xml_l in test_words:
relevant_xml_ls.add(xml_l)
continue
for xml_l_analysis in word_to_analyses[xml_l]:
xml_lemma = partition_analysis(xml_l_analysis)[1]
for test_word_lemma in test_word_lemmas:
if test_word_lemma == xml_lemma:
relevant_xml_ls.add(xml_l)
break
relevant_crkeng_entries = []
for element in crkeng_entries:
xml_l = extract_l_str(element)
if xml_l in relevant_xml_ls:
relevant_crkeng_entries.append(element)
crkeng_xml_utils.write_xml_from_elements(
list(crkeng_root.findall(".//source")) + relevant_crkeng_entries,
shared_res_dir / "test_dictionaries" / "crkeng.xml",
) | [
"def XML_EC_PL(Name, InputsFile, OutputFile, emin,emax):\n\n\t#On commence par afficher ce qu'on fait\r\n\tprint \" Build xml file \"\r\n\r\tprint InputsFile\n\t#ouverture du fichier dans lequel on place le source model\n\ttry:\n\t\tfresult = open(OutputFile, 'w')\n\texcept:\n\t\tprint \"Coucou\"\r\n \t#ecriture des premieres lignes invariantes\n\tfresult.write('<?xml version=\"1.0\" ?>')\r\n\tfresult.write(\"<source_library title=\\\"source library\\\">\\n\")\n\r\n \t#ouverture du fichier avec les entrees\r\n\tf = open(InputsFile,\"r\")\r\n\tlines = f.readlines()\r\n\t\r\n \t#Ajout des sources detectees dans le catalogue\n\t#Pour chaque ligne du fichier d'entree\r\n\tfor line in range(len(lines)):\n\t\t#Lire les donnees de la ligne\t\t\r\n\t\tdata = lines[line].split()\r\n\t\tname = data[0]\n\n\t\t#Verification : est on en train de traiter la source que l'on veut etudier ou une autre ?\r\n\t\tif str(name) == Name :\r\n\t\t\tmysource = 1\r\n\t\telse:\r\n\t\t\tmysource = 0\n\n\t\t#recuperation des donnees\r\n\t\tRA = data[1]\r\n\t\tDEC = data[2]\r\n\t\tIntegral = float(data[3])*float(Frac)\r\n\t\tGamma= data[4]\n\n\t\t\r\n\t\ttry:\n\t\t\t#essai de definition des donnees pour un PL avec ExpCut\n\t\t\tPrefactor = float(data[5])*float(Frac)\r\n\t\t\tEnergy = float(data[6])\r\n\t#\t\tPrefactor = Prefactor/pow(Energy/100., float(Gamma)) #Densite de flux calculee a Epivot\r\n\t#\t\tPrefactor = Prefactor*pow(1000./100., float(Gamma)) #We do the calculation with (E/1000.)^Gamma\n\t\t\tvariabilite=float(data[8])\n\n#\t\t\tprint variabilite\n\n\n\n\r\n\t\t\tcut = float(data[7]) # Cut est la variable qui nous permettra de savoir si il faut utiliser un cut off (1) ou une loi de puissance normale (2)\r\n\t\texcept:\r\n\t\t\ttry:\r\n\t\t\t\tcut = float(data[5])\r\n\t\t\texcept:\r\n\t\t\t\tprint \" Wrong size of list \"\r\n\t\t\t\tsys.exit()\r\n \t#Si on considere un ccut off exponentiel pour la source :\r\n\t\tif cut == 1:\n\t\t\t#ecriture du nom de la source consideree\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\r\n\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\r\n\t\t\tspectrum_type = \"PLSuperExpCutoff\"\n\t\t\t#Utilisation de la modelisation PLSuperExpCutoff car plus simple et plus intuitive pour nous et pour la modelisation des pulsars si il faut en modeliser\n\r\n\t\t\t#definition des parametres spectraux a prendre en comtpe et de la chaine de caractere a integrer\r\n\n\n\n\t\t\tif variabilite==0.0 or variabilite==2.0:\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"10000000.0\\\" min=\\\"0.0000001\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\r\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.001\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\n\r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\t\t\telif variabilite==1.0 :\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10000000.0\\\" min=\\\"0.0\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.0001\\\"\"\r\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\n\r\n \r\n\n# <spectrum type=\"PLSuperExpCutoff\">\n# <parameter free=\"1\" max=\"100000\" min=\"0\" name=\"Prefactor\" scale=\"1e-10\" value=\"Prefactor*1e-10\"/>\n# <parameter free=\"1\" max=\"0\" min=\"5\" name=\"Index1\" scale=\"-1\" value=\"valeur du catalogue\"/>\n# <parameter free=\"0\" max=\"20000\" min=\"1.0\" name=\"Scale\" scale=\"1\" value=\"Epivot\"/>\n# <parameter free=\"1\" max=\"300000\" min=\"100\" name=\"Cutoff\" scale=\"1\" value=\"3000\"/>\n# <parameter free=\"0\" max=\"5\" min=\"0\" name=\"Index2\" scale=\"1\" value=\"1.5\"/>\n# </spectrum>\n\n\r\n\t\telse:\n\t\t#Sinon (si on considere une loi de puissance simple)\n\t\t#definition de la chaine de caractere comportant le nom de la source\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\n\t\t\tif mysource == 0:\r\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\n\t\t\telse:\n\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\t\t\t\t\n\n\t\t\t#definition de la chaine de caractere correspondant a la forme de fit que l'on souhaite utiliser (Loi de puissance)\r\n\t\t\tspectrum_type = \"PowerLaw2\"\r\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre Integrale\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\telse:\n\t\t\t#sinon on le libere\r\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\n\n\n\n\n\n\t\t\t#Toujours ce facteur....\r\n\t\t\tIntegral = float(Integral)*1e10\r\n\t\t\tscale = 1e-10\n\n\n\t\n\r\n\t\t\tspectrum_lines += \" name=\\\"Integral\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre gamma\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\telse:\n\t\t\t\t#si c'est pas la source que l'on etudie on le laisse libre\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\n\n\t\t\t#fin de la chaine de parametres sur le modele spectral\r\n\t\t\tspectrum_lines += \" name=\\\"Index\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t \n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"1000.0\\\"/>\\n\"\r\n \r\n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\t\t\telse:\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"100\\\"/>\\n\"\n\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"100000.0\\\" Min =\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\n \t\t#ajout du modele spectral a la liste de parametres \r\n\t\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\t\tresult_line += spectrum_lines\r\n\t\tresult_line += \" </spectrum>\\n\"\n\n\t\t\n\n\t\tif mysource==0 and variabilite!=1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telif mysource==0 and variabilite==1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telse:\n #ajout du modele spatial a la liste de parametres \n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\t\t\n\t\tresult_line += \" </source>\\n\"\r\n\t\tfresult.write(result_line+\"\\n\")\r\n #Ajout du fond diffus galactique\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"gal_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"ConstantValue\"\r\n\r\n\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Value\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n\r\n\tresult_line += \" <spatialModel file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/gll_iem_v02.fit\\\" type=\\\"MapCubeFunction\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"1000.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Normalization\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\r\n \t#Ajout du fond diffus extragalactique\r\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"eg_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"FileFunction\"\r\n\r\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Normalization\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/isotropic_iem_v02.txt\\\" type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n \r\n\tresult_line += \" <spatialModel type=\\\"ConstantValue\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"100.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Value\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\n \t#Fermeture des fichiers \r\n\tf.close() \r\n\tfresult.write(\"\\n</source_library>\\n\")\r\n\tfresult.close()\r\n\treturn",
"def buildxml(self):\n # assume self._objslock is already held here\n logger.info(\"Emane.buildxml()\")\n self.buildplatformxml()\n self.buildnemxml()\n self.buildtransportxml()\n self.buildeventservicexml()",
"def test_01_Xml(self):\n self.assertEqual(self.m_xml.root.tag, 'PyHouse', 'Invalid XML - not a PyHouse XML config file')\n self.assertEqual(self.m_xml.computer_div.tag, 'ComputerDivision')\n self.assertEqual(self.m_xml.communication_sect.tag, 'CommunicationSection')\n self.assertEqual(self.m_xml.email_sect.tag, 'EmailSection')\n self.assertEqual(self.m_xml.twitter_sect.tag, 'TwitterSection')\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.APIs, 'Pyhouse', 120))",
"def test_01_FindXml(self):\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)\n # sprint(PrettyFormatAny.form(self.m_root_xml, 'A3-01-A - Entire Xml'))\n self.assertEqual(self.m_xml.controller_sect.tag, 'ControllerSection', 'XML - No Controllers section')\n # print(PrettyFormatAny.form(self.m_xml.controller_sect, 'A3-01-B - All Controllers Xml'))\n self.assertEqual(self.m_xml.controller.tag, 'Controller', 'XML - No Controller section')\n # print(PrettyFormatAny.form(self.m_xml.controller, 'A3-01-C - First Controller Xml'))",
"def __human_readable_parse(self):\n\n xml_file = open(self.__xml_path).read() #open de xml\n\n #Adding four whitespace by level of deepness\n xml_file = re.sub('>\\s*<cases>', '>\\n <cases>',xml_file)\n xml_file = re.sub(r'>\\s*</cases>', '>\\n </cases>',xml_file)\n xml_file = re.sub(r'>\\s*<group', '>\\n <group',xml_file)\n xml_file = re.sub(r'>\\s*</group>', '>\\n </group>',xml_file)\n xml_file = re.sub(r'>\\s*<case ', '>\\n <case ',xml_file)\n xml_file = re.sub(r'>\\s*</case>', '>\\n </case>',xml_file)\n xml_file = re.sub(r'>\\s*<susp_snippet', '>\\n <susp_snippet',xml_file)\n xml_file = re.sub(r'>\\s*<src_snippet', '>\\n <src_snippet',xml_file)\n xml_file = re.sub(r'>\\s*<annotation', '>\\n <annotation',xml_file)\n xml_file = re.sub(r'>\\s*</annotation>', '>\\n </annotation>',xml_file)\n xml_file = re.sub(r'>\\s*<phenomenon', '>\\n <phenomenon',xml_file)\n xml_file = re.sub(r'>\\s*<susp_chunk', '>\\n <susp_chunk',xml_file)\n xml_file = re.sub(r'>\\s*<src_chunk', '>\\n <src_chunk',xml_file)\n\n new_xml_file = open(self.__xml_path,'w')\n new_xml_file.write(xml_file)\n new_xml_file.close()",
"def test_01_FindXml(self):\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)\n self.assertEqual(self.m_xml.thermostat_sect.tag, 'ThermostatSection', 'XML - No Thermostat section')\n self.assertEqual(self.m_xml.thermostat.tag, 'Thermostat', 'XML - No Thermostat Entry')\n self.assertEqual(self.m_xml.light_sect.tag, 'LightSection', 'XML - No Light section')\n self.assertEqual(self.m_xml.light.tag, 'Light', 'XML - No Light Entry')",
"def _populate_from_xml_file(self, xml):\n '''\n example from API: http://www.ga.gov.au/www/argus.argus_api.survey?pSurveyNo=921\n\n <?xml version=\"1.0\" ?>\n <ROWSET>\n <ROW>\n <SURVEYID>921</SURVEYID>\n <SURVEYNAME>Goomalling, WA, 1996</SURVEYNAME>\n <STATE>WA</STATE>\n <OPERATOR>Stockdale Prospecting Ltd.</OPERATOR>\n <CONTRACTOR>Kevron Geophysics Pty Ltd</CONTRACTOR>\n <PROCESSOR>Kevron Geophysics Pty Ltd</PROCESSOR>\n <SURVEY_TYPE>Detailed</SURVEY_TYPE>\n <DATATYPES>MAG,RAL,ELE</DATATYPES>\n <VESSEL>Aero Commander</VESSEL>\n <VESSEL_TYPE>Plane</VESSEL_TYPE>\n <RELEASEDATE/>\n <ONSHORE_OFFSHORE>Onshore</ONSHORE_OFFSHORE>\n <STARTDATE>05-DEC-96</STARTDATE>\n <ENDDATE>22-DEC-96</ENDDATE>\n <WLONG>116.366662</WLONG>\n <ELONG>117.749996</ELONG>\n <SLAT>-31.483336</SLAT>\n <NLAT>-30.566668</NLAT>\n <LINE_KM>35665</LINE_KM>\n <TOTAL_KM/>\n <LINE_SPACING>250</LINE_SPACING>\n <LINE_DIRECTION>180</LINE_DIRECTION>\n <TIE_SPACING/>\n <SQUARE_KM/>\n <CRYSTAL_VOLUME>33.6</CRYSTAL_VOLUME>\n <UP_CRYSTAL_VOLUME>4.2</UP_CRYSTAL_VOLUME>\n <DIGITAL_DATA>MAG,RAL,ELE</DIGITAL_DATA>\n <GEODETIC_DATUM>WGS84</GEODETIC_DATUM>\n <ASL/>\n <AGL>60</AGL>\n <MAG_INSTRUMENT>Scintrex CS2</MAG_INSTRUMENT>\n <RAD_INSTRUMENT>Exploranium GR820</RAD_INSTRUMENT>\n </ROW>\n </ROWSET>\n '''\n # turn the XML doc into a Python object\n root = objectify.fromstring(xml)\n\n if hasattr(root.ROW, 'SURVEYNAME'):\n self.survey_name = root.ROW.SURVEYNAME\n if hasattr(root.ROW, 'STATE'):\n self.state = root.ROW.STATE\n if hasattr(root.ROW, 'OPERATOR'):\n self.operator = root.ROW.OPERATOR\n if hasattr(root.ROW, 'CONTRACTOR'):\n self.contractor = root.ROW.CONTRACTOR\n if hasattr(root.ROW, 'PROCESSOR'):\n self.processor = root.ROW.PROCESSOR\n if hasattr(root.ROW, 'SURVEY_TYPE'):\n self.survey_type = root.ROW.SURVEY_TYPE\n if hasattr(root.ROW, 'DATATYPES'):\n self.data_types = root.ROW.DATATYPES\n if hasattr(root.ROW, 'VESSEL'):\n self.vessel = root.ROW.VESSEL\n if hasattr(root.ROW, 'VESSEL_TYPE'):\n self.vessel_type = root.ROW.VESSEL_TYPE\n if hasattr(root.ROW, 'RELEASEDATE'):\n self.release_date = datetime.strptime(root.ROW.RELEASEDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.RELEASEDATE.text is not None else None\n if hasattr(root.ROW, 'ONSHORE_OFFSHORE'):\n self.onshore_offshore = root.ROW.ONSHORE_OFFSHORE\n if hasattr(root.ROW, 'STARTDATE'):\n self.start_date = datetime.strptime(root.ROW.STARTDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.STARTDATE.text is not None else None\n if hasattr(root.ROW, 'ENDDATE'):\n self.end_date = datetime.strptime(root.ROW.ENDDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.ENDDATE.text is not None else None\n if hasattr(root.ROW, 'WLONG'):\n self.w_long = root.ROW.WLONG\n if hasattr(root.ROW, 'ELONG'):\n self.e_long = root.ROW.ELONG\n if hasattr(root.ROW, 'SLAT'):\n self.s_lat = root.ROW.SLAT\n if hasattr(root.ROW, 'NLAT'):\n self.n_lat = root.ROW.NLAT\n if hasattr(root.ROW, 'LINE_KM'):\n self.line_km = root.ROW.LINE_KM\n if hasattr(root.ROW, 'TOTAL_KM'):\n self.total_km = root.ROW.TOTAL_KM\n if hasattr(root.ROW, 'LINE_SPACING'):\n self.line_spacing = root.ROW.LINE_SPACING\n if hasattr(root.ROW, 'LINE_DIRECTION'):\n self.line_direction = root.ROW.LINE_DIRECTION\n if hasattr(root.ROW, 'TIE_SPACING'):\n self.tie_spacing = root.ROW.TIE_SPACING\n if hasattr(root.ROW, 'SQUARE_KM'):\n self.square_km = root.ROW.SQUARE_KM\n if hasattr(root.ROW, 'CRYSTAL_VOLUME'):\n self.crystal_volume = root.ROW.CRYSTAL_VOLUME\n if hasattr(root.ROW, 'UP_CRYSTAL_VOLUME'):\n self.up_crystal_volume = root.ROW.UP_CRYSTAL_VOLUME\n if hasattr(root.ROW, 'DIGITAL_DATA'):\n self.digital_data = root.ROW.DIGITAL_DATA\n if hasattr(root.ROW, 'GEODETIC_DATUM'):\n self.geodetic_datum = root.ROW.GEODETIC_DATUM\n if hasattr(root.ROW, 'ASL'):\n self.asl = root.ROW.ASL\n if hasattr(root.ROW, 'AGL'):\n self.agl = root.ROW.AGL\n if hasattr(root.ROW, 'MAG_INSTRUMENT'):\n self.mag_instrument = root.ROW.MAG_INSTRUMENT\n if hasattr(root.ROW, 'RAD_INSTRUMENT'):\n self.rad_instrument = root.ROW.RAD_INSTRUMENT",
"def create_xml_description(path, file=\"251.xml\", verbose=False):\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <WORKFLOW_TEMPLATE TITLE=\"default search\" UUID=\"f499a2d3-22f0-4ab6-b0d9-0999d01e543f\" WORKFLOW_TEMPLATE_ID=\"_13927376466640_5646062711616823\">\n <PROTEINLYNX_QUERY TYPE=\"Databank-search\">\n <DATABANK_SEARCH_QUERY_PARAMETERS>\n <SEARCH_ENGINE_TYPE VALUE=\"PLGS\"/>\n <SEARCH_DATABASE NAME=\"UNIPROT\"/>\n <SEARCH_TYPE NAME=\"Electrospray-Shotgun\"/>\n <IA_PARAMS>\n <FASTA_FORMAT VALUE=\"DEF\"/>\n <PRECURSOR_MHP_WINDOW_PPM VALUE=\"-1\"/>\n <PRODUCT_MHP_WINDOW_PPM VALUE=\"-1\"/>\n <NUM_BY_MATCH_FOR_PEPTIDE_MINIMUM VALUE=\"2\"/>\n <NUM_PEPTIDE_FOR_PROTEIN_MINIMUM VALUE=\"1\"/>\n <NUM_BY_MATCH_FOR_PROTEIN_MINIMUM VALUE=\"5\"/>\n <PROTEIN_MASS_MAXIMUM_AMU VALUE=\"2500000\"/>\n <FALSE_POSITIVE_RATE VALUE=\"1\"/>\n <AQ_PROTEIN_ACCESSION VALUE=\"\"/>\n <AQ_PROTEIN_MOLES VALUE=\"-1\"/>\n <MANUAL_RESPONSE_FACTOR VALUE=\"1000\"/>\n <DIGESTS>\n <ANALYSIS_DIGESTOR MISSED_CLEAVAGES=\"2\">\n <AMINO_ACID_SEQUENCE_DIGESTOR NAME=\"Trypsin\" UUID=\"50466de0-ff04-4be2-a02f-6ccc7b5fd1f5\">\n <CLEAVES_AT AMINO_ACID=\"K\" POSITION=\"C-TERM\">\n <EXCLUDES AMINO_ACID=\"P\" POSITION=\"N-TERM\"/>\n </CLEAVES_AT>\n <CLEAVES_AT AMINO_ACID=\"R\" POSITION=\"C-TERM\">\n <EXCLUDES AMINO_ACID=\"P\" POSITION=\"N-TERM\"/>\n </CLEAVES_AT>\n </AMINO_ACID_SEQUENCE_DIGESTOR>\n </ANALYSIS_DIGESTOR>\n </DIGESTS>\n <MODIFICATIONS>\n <ANALYSIS_MODIFIER STATUS=\"FIXED\">\n <MODIFIER MCAT_REAGENT=\"No\" NAME=\"Carbamidomethyl+C\">\n <MODIFIES APPLIES_TO=\"C\" DELTA_MASS=\"57.0215\" TYPE=\"SIDECHAIN\"/>\n </MODIFIER>\n </ANALYSIS_MODIFIER>\n <ANALYSIS_MODIFIER ENRICHED=\"FALSE\" STATUS=\"VARIABLE\">\n <MODIFIER MCAT_REAGENT=\"No\" NAME=\"Oxidation+M\">\n <MODIFIES APPLIES_TO=\"M\" DELTA_MASS=\"15.9949\" TYPE=\"SIDECHAIN\"/>\n </MODIFIER>\n </ANALYSIS_MODIFIER>\n </MODIFICATIONS>\n </IA_PARAMS>\n </DATABANK_SEARCH_QUERY_PARAMETERS>\n </PROTEINLYNX_QUERY>\n </WORKFLOW_TEMPLATE>\"\"\"\n with open(join(path, file), \"w\") as h:\n h.write(xml)\n if verbose:\n print(\"Created the {} file.\".format(file))",
"def runXML(self, xml, host):\n\n\t\tlist = []\n\t\tself.generator.parse(xml)\n\t\tif self.owner.section == 'all':\n\t\t\tfor section in [\n\t\t\t\t'order',\n\t\t\t\t'debug',\n\t\t\t\t'main',\n\t\t\t\t'packages',\n\t\t\t\t'pre',\n\t\t\t\t'post',\n\t\t\t\t'boot',\n\t\t\t\t'installclass'\n\t\t\t\t]:\n\t\t\t\tlist += self.generator.generate(section,\n\t\t\t\t\tannotation=self.owner.annotation)\n\t\telse:\n\t\t\tlist += self.generator.generate(self.owner.section,\n\t\t\t\t\tannotation=self.owner.annotation)\n\t\tself.owner.addOutput(host,\n\t\t\tself.owner.annotate('<profile lang=\"kickstart\">'))\n\t\tself.owner.addOutput(host,\n\t\t\tself.owner.annotate('<section name=\"kickstart\">'))\n\t\tself.owner.addOutput(host, self.owner.annotate('<![CDATA['))\n\t\tfor i in list:\n\t\t\tself.owner.addOutput(host, i)\n\t\tself.owner.addOutput(host, self.owner.annotate(']]>'))\n\t\tself.owner.addOutput(host, self.owner.annotate('</section>'))\n\t\tself.owner.addOutput(host, self.owner.annotate('</profile>'))",
"def test_pep8_conformance_pygccxml(self):\n\n print(\"\\r\\n\")\n\n # Get the path to current directory\n path = os.path.dirname(os.path.realpath(__file__))\n path += \"/../pygccxml/\"\n\n self.run_check(path)",
"def test_02_XML(self):\n pass",
"def generate_xml(data):\n if VERBOSE: print \"Generating XML...\"\n \n # Top element\n results = ET.Element('results')\n results.set(\"gold\", data['goldfile'])\n results.set(\"system\", data['sysfile'])\n\n # Types\n types = ET.SubElement(results, 'types')\n types.text = data['types']\n\n # Regime\n regime = ET.SubElement(results, 'regime')\n regime.text = data['regime']\n\n # Counts\n counts = ET.SubElement(results, 'counts')\n\n for k,v in data['counts'].iteritems():\n element = ET.SubElement(counts, k)\n element.text = str(v)\n\n # Scores\n scores = ET.SubElement(results, 'scores')\n\n for t in data['scores']:\n score_type = ET.SubElement(scores, t)\n\n for m,v in data['scores'][t].iteritems():\n measure = ET.SubElement(score_type, m)\n measure.text = str(v)\n\n # Gold edits\n goldedits = ET.SubElement(results, 'goldedits')\n\n for edit in data['goldedits']:\n element = ET.SubElement(goldedits, 'edit', edit)\n\n # Spurious edits\n spuriousedits = ET.SubElement(results, 'spuriousedits')\n\n for edit in data['spuriousedits']:\n element = ET.SubElement(spuriousedits, 'edit', edit)\n\n return results",
"def generate_tallies_xml(self,filename_string,default_group_struc=False):\n #reads a tallies.xml file with a reaction rate tally and uses the \n #material filter to build a new tallies.xml with hybrid tallies using\n #the energy filter. \n\n tree = ET.parse(filename_string)\n root = tree.getroot()\n mat_filter = root[0][0].text\n energy_filter = self.gen_energy_struc(default_group_struc)\n\n new_data = ET.Element('tallies')\n\n #attributes for hybrid tally.\n attrib_mat = {'id':'1', 'type':'material'}\n attrib_energy = {'id':'2', 'type':'energy'}\n empty_attrib = {}\n\n filter_mat = ET.SubElement(new_data,'filter',attrib_mat)\n filter_ene = ET.SubElement(new_data,'filter',attrib_energy)\n ET.SubElement(filter_mat,'bins',empty_attrib)\n ET.SubElement(filter_ene,'bins',empty_attrib)\n \n new_data[0][0].text = mat_filter\n new_data[1][0].text = ' '.join(str(i) for i in energy_filter)\n\n #writing '<tally>' object\n for key,value in enumerate(self.tallies_dict.items()):\n tally_id = value[1]['id']\n tally_name = value[1]['name']\n attrib_tallies = {'id':tally_id,'name':tally_name}\n tally_level = ET.SubElement(new_data,'tally',attrib_tallies)\n\n ET.SubElement(tally_level,'filters',empty_attrib)\n \n if (value[1]['nuclides'] != ''):\n ET.SubElement(tally_level,'nuclides',empty_attrib)\n ET.SubElement(tally_level,'scores',empty_attrib)\n #last_index could be either a 1 or a 2, depending on\n #whether we are doing a reaction rate tally (i.e, last_index=1) or\n #a flux tally (i.e., last_index = 2)\n last_index = 1\n filter_list = value[1]['filter']\n score_list = value[1]['scores']\n #offset by '2' indicates we are writing the tally objects below\n #the material and energy filters\n new_data[key+2][0].text = ' '.join(i for i in filter_list)\n \n if (value[1]['nuclides'] != ''):\n nuclide_list = value[1]['nuclides']\n new_data[key+2][1].text = ' '.join(nuc for nuc in nuclide_list)\n last_index = 2\n new_data[key+2][last_index].text = ' '.join(i for i in score_list)\n\n if (old is True):\n clean_xml_indentation(new_data)\n else:\n clean_indentation(new_data)\n #renaming original 'tallies.xml' to 'tallies-original.xml'\n #to distinguish it from the new hybrid 'tally.xml' file.\n tree = ET.ElementTree(new_data)\n #os.rename(filename_string,'tallies-original.xml')\n tree.write('./tallies-hybrid-tally.xml', xml_declaration=True,\n encoding='utf-8', method=\"xml\")",
"def generate_from_xml(self, filename):\n\n pass",
"def RewriteComponentXml(file_name, engines_xml):\n output = StringIO.StringIO()\n for line in fileinput.input(file_name):\n if re.search(r'<engines exec=', line):\n output.write(engines_xml)\n else:\n output.write(line)\n file(file_name, 'w').write(output.getvalue())",
"def build(filename=\"JMdict_e.gz\", output_filename=DATABASE_FILENAME):\n # NOTE: The JMdict XML file contains XML entities, that are expanded when\n # parsed using Python's stdlib xml.etree.ElementTree like so:\n # ElementTree.parse(f). That is undesired behavior for our use-case. Oshi\n # needs to parse the short entity string, for example &adj-i; should be\n # \"adj-i\" instead of \"adjective (keiyoushi)\". That's why it uses an external\n # xml parser: lxml that allows you to specify whether to expand entites.\n extension = path.splitext(filename)[1].lower()\n parser = etree.XMLParser(resolve_entities=False)\n if extension == \".gz\":\n with gzip.open(filename) as f:\n tree = etree.parse(f, parser)\n elif extension == \".xml\":\n tree = etree.parse(filename, parser)\n else:\n raise ValueError(\"File extension not supported: \" + extension)\n\n entries = []\n # variables starting with x contain xml element(s)\n for xentry in tree.getroot():\n entry = {}\n entry[\"writings\"] = [x.find('keb').text for x in xentry.findall('k_ele')]\n entry[\"readings\"] = [x.find('reb').text for x in xentry.findall('r_ele')]\n xsenses = xentry.findall('sense')\n senses = []\n # last_tags will contain a reference to previously found tags (JMdict\n # specifies that when pos is empty, the previous one should be used)\n last_tags = []\n for xsense in xsenses:\n tags = []\n xtags = xsense.findall('pos') # + xsense.findall('misc')\n for xtag in xtags:\n match = re.search(r'&([\\w-]+?);', etree.tostring(xtag, encoding=\"utf-8\").decode('utf-8') or \"\")\n if match: tags.append(match.group(1))\n glosses = [x.text for x in xsense.findall('gloss')]\n senses.append({\"glosses\": glosses, \"tags\": tags or last_tags})\n last_tags = tags or last_tags\n entry[\"senses\"] = senses\n entries.append(entry)\n with open(output_filename, 'w', encoding='utf-8') as f:\n json.dump(entries, f, ensure_ascii=False)",
"def test_write(self):\n cases = {\n self.test_eac + \"NE00401.xml\": True,\n self.test_eac + \"NE01501.xml\": False,\n self.test_eac + \"NE01302.xml\": True,\n }\n metadata_url = 'http://www.example.com/metadata.xml'\n presentation_url = 'http://www.example.com/presentation.html'\n for case in cases:\n doc = EacCpf.EacCpf(case, metadata_url, presentation_url)\n self.assertNotEqual(doc, None)\n path = doc.write(self.temp)\n self.assertEquals(os.path.exists(path), True)\n # read the file and try to extract the attributes\n try:\n tree = etree.parse(path)\n ns = {\n EacCpf.DOC_KEY: EacCpf.DOC_NS,\n EacCpf.ESRC_KEY: EacCpf.ESRC_NS,\n }\n # get the url to the metadata file\n metadata = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":metadata\", namespaces=ns)\n self.assertNotEqual(metadata, None)\n self.assertEqual(metadata[0], metadata_url)\n # get the url to the presentation file\n presentation = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":presentation\", namespaces=ns)\n self.assertNotEqual(presentation, None)\n self.assertEqual(presentation[0], presentation_url)\n # get the url to the source file\n source = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":source\", namespaces=ns)\n self.assertNotEqual(source, None)\n self.assertEqual(source[0], case)\n except:\n msg = \"Failed to complete parsing of {0}\".format(case)\n self.log.error(msg, exc_info=True)\n self.fail(msg)",
"def get_document_xml():",
"def buildXMLTree(self): \n self.logProgress(stack()[0][3])\n\n def indent(elem, level=0):\n# self.logProgress(stack()[0][3])\n\n i = \"\\n\" + level * \" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n indent(elem, level + 1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n # create the root element\n root = Element('lccSchema')\n tree = ElementTree(root)\n\n # add attributes to root for XSD validation of XML document \n rootAttribDict = root.attrib\n for aAttribute in constants.XmlValidation:\n rootAttribDict.update(aAttribute)\n \n # create metadata nodes\n meta = Element(constants.XmlElementMetadata)\n root.append(meta)\n metaName = Element(constants.XmlElementMetaname)\n metaName.text = str(self.MetadataNameLineEdit.text())\n meta.append(metaName)\n metaDesc = Element(constants.XmlElementMetadescription)\n metaDesc.text = str(self.MetadataDescriptionTextEdit.toPlainText())\n meta.append(metaDesc)\n\n # add the coefficients text and coefficients\n coeffText = etree.Comment(\"\"\" \n * The coefficients node contains coefficients to be assigned to values.\n \n * REQUIRED ATTRIBUTES\n * Id - text, unique identifier\n * Name - text, word or phrase describing coefficient\n * fieldName - text, name of field to be created for output\n * method - text, \"P\" or \"A\", designates \"P\"ercentage or per unit \"A\"rea calculation routine\n \"\"\")\n root.append(coeffText)\n coeffs = Element(constants.XmlElementCoefficients)\n \n for coef in self.tempLccObj.coefficients:\n tempid = self.tempLccObj.coefficients[str(coef)].coefId\n tempname = self.tempLccObj.coefficients[str(coef)].name\n tempfieldname = self.tempLccObj.coefficients[str(coef)].fieldName\n tempCalcMethod = self.tempLccObj.coefficients[str(coef)].calcMethod\n coeff = Element(constants.XmlElementCoefficient, Id=tempid, Name=tempname, fieldName=tempfieldname, method=tempCalcMethod)\n coeffs.append(coeff)\n root.append(coeffs)\n\n # add the value text and values\n# valText = etree.Comment(\"\"\"\n# * The \"values\" node defines the full set of values that can exist in a landcover raster\n# * The \"excluded\" attribute is used to exclude values from the total, excluded=false is the default\n# * Actual excluded values are always treated as excluded=true, cannot be used in classes, and should not be listed here. \n# \"\"\")\n valText = etree.Comment(\"\"\" \n * The values node defines the full set of values that can exist in a land cover raster.\n \n * REQUIRED ATTRIBUTES\n * Id - integer, raster code\n *\n * OPTIONAL ATTRIBUTES\n * Name - text, word or phrase describing value\n * excluded - boolean, \"true\" or \"false\" or \"1\" or \"0\"\n * - used to exclude values from effective area calculations\n * - excluded=false is the default \n \n * A value element can optionally contain one or more coefficient elements\n\n * REQUIRED COEFFICIENT ATTRIBUTES\n * Id - text, must match an Id attribute from a coefficients node element\n * value - decimal, weighting/calculation factor\n \"\"\")\n root.append(valText)\n values = Element(constants.XmlElementValues)\n # get the values\n for key in sorted(self.tempLccObj.values.iteritems(), key=operator.itemgetter(0)):\n valDict = {}\n valDict[constants.XmlAttributeId] = str(key[1].valueId)\n valDict[constants.XmlAttributeName] = str(key[1].name)\n if (key[1].excluded):\n valDict[constants.XmlAttributeNodata] = 'true'\n val = Element(constants.XmlAttributeValue, attrib=valDict)\n values.append(val)\n # get the coefficients for each value\n for coef in key[1]._coefficients:\n coefDict = {}\n coefDict[constants.XmlAttributeId] = key[1]._coefficients[str(coef)].coefId\n if key[1]._coefficients[str(coef)].value == 0.0:\n coefDict[constants.XmlAttributeValue] = \"0.0\"\n else:\n coefDict[constants.XmlAttributeValue] = str(key[1]._coefficients[str(coef)].value)\n if coefDict[\"value\"] == \"\":\n pass\n coefe = Element(constants.XmlElementCoefficient, attrib=coefDict)\n val.append(coefe)\n root.append(values)\n if self.tempLccObj.classes.topLevelClasses == None:\n indent(tree.getroot())\n return tree\n # add the class text and the class nodes\n# classText = etree.Comment(\"\"\"\n# * The \"classes\" node contains values grouped into classes.\n# * A class can contain either values or classes but not both types\n# * Values contain only an id which refers to a value in values node.\n# * The id attribute is used for the root of the field name in the output(for example %forest would be P + for = Pfor)\n# * Two classes with the same id are not allowed.\n# * Special class attributes:\n# - onSlopeVisible: Make available in \"On Slope\" metric category, default is false\n# - overwriteField: if present, it overides default \"Land Cover Proportions\" field name with the supplied value\n# \"\"\")\n classText = etree.Comment(\"\"\"\n * The classes node contains values from a land cover raster grouped into one or more classes.\n \n * REQUIRED ATTRIBUTES\n * Id - text, unique identifier, also used for automated generation of output field name\n \n * OPTIONAL ATTRIBUTES\n * Name - text, word or phrase describing class\n * filter - text, a string of one or more tool name abbreviations separated by a \";\"\n * - possible abbreviations are: lcp, rlcp, lcosp, splcp, and caem\n * - used to exclude the class from the selectable classes in the tool's GUI\n * xxxxField - text, overrides ATtILA-generated field name for output\n * - where xxxx equals a tool name abbreviation\n * - possible abbreviations are: lcp, rlcp, lcosp, splcp, and caem\n * - a separate xxxxField attribute can exist for each tool\n\n * A class can contain either values or classes but not both types.\n * Value elements contain only an Id attribute which refers to a value in a raster.\n * Values tagged as excluded=\"true\" in the values node should not be included in any class.\n \"\"\")\n root.append(classText)\n classes = Element('classes')\n root.append(classes)\n # function to find child classes of the parent classes\n def printDescendentClasses(landCoverClass, classE):\n# self.logProgress(stack()[0][3])\n\n if landCoverClass.childClasses:\n for childClass in landCoverClass.childClasses:\n assert isinstance(childClass, lcc.EditorLandCoverClass)\n # childClass\n clasDict = {}\n clasDict[constants.XmlAttributeId] = str(childClass.classId)\n clasDict[constants.XmlAttributeName] = str(childClass.name)\n for field in self.tempLccObj.overwriteFieldsNames:\n if childClass.classoverwriteFields[field]:\n clasDict[field] = childClass.classoverwriteFields[field]\n clasDict[constants.XmlAttributeFilter] = \"\" \n childClas = Element(constants.XmlElementClass, attrib=clasDict)\n classE.append(childClas)\n for childValueId in sorted(childClass.childValueIds):\n if self.tempLccObj.values[childValueId].excluded:\n continue\n childVal = Element(constants.XmlElementValue, Id=str(childValueId))\n childClas.append(childVal)\n printDescendentClasses(childClass, childClas)\n else:\n return\n for clas in self.tempLccObj.classes.topLevelClasses:\n clasDict = {}\n clasDict[constants.XmlAttributeId] = str(clas.classId)\n clasDict[constants.XmlAttributeName] = str(clas.name)\n for field in self.tempLccObj.overwriteFieldsNames:\n if clas.classoverwriteFields[field]:\n clasDict[field] = clas.classoverwriteFields[field]\n clasDict[constants.XmlAttributeFilter] = \"\"\n classE = Element(constants.XmlElementClass, attrib=clasDict)\n classes.append(classE)\n for childValueId in clas.childValueIds:\n if self.tempLccObj.values[childValueId].excluded:\n continue\n childVal = Element(constants.XmlElementValue, Id=str(childValueId))\n classE.append(childVal)\n printDescendentClasses(clas, classE) \n\n # indent the output correctly and the write to file\n indent(tree.getroot())\n \n\n self.logProgress(stack()[0][3] + \" END\")\n\n return tree"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the config file | def update(self):
self.save_config_file() | [
"def conf_update(self):\n pass",
"def update_config():\n config.update_config(config.usr_config_file, config.def_config_file)",
"def config_edit(self):\n Tools.file_edit(self.config_file_path)",
"def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)",
"async def _update_config(self):\n if self.config['data'] is None or self.config_expired:\n data = await self.get_data(self.url_builder('configuration'))\n self.config = dict(data=data, last_update=datetime.now())",
"def __update(self):\n if self.__file:\n target_file = open(self.__file)\n for attr in dir(self):\n if not attr.startswith(\"_\") and \\\n (self.__overwrite or (attr not in self.__exclude)) \\\n and not self.__is_attr_callable(attr):\n try:\n delattr(self, attr)\n except AttributeError:\n pass\n pool = yaml.load(target_file)\n target_file.close()\n if pool: # could be None\n for key, val in pool.iteritems():\n if not key.startswith(\"_\") and \\\n (self.__overwrite or (key not in self.__exclude)) \\\n and not self.__is_attr_callable(key):\n setattr(self, key, val)\n if hasattr(self, 'log_config_file_changes')\\\n and self.log_config_file_changes:\n logging.getLogger(__name__).info(\"Config file has updated.\")",
"def update_config(self, data):\n self.config.data = dict_merge(self.config.data, data)\n self.config.save()",
"def update_config(config, config_file=CONFIG_FILE):\n yaml.dump(config, open(config_file, \"w\"), default_flow_style=False)\n log.info(\"Updated config in %s\" % CONFIG_FILE)",
"def updateFile(self):",
"def editConfigFile(self):\r\n pass #Will be defined at a later time\r",
"def update_conf_file():\n filepath = remote_dir + \"/apache2/conf/httpd.conf\"\n fabric.contrib.files.sed(filepath, 'myproject', project_name)",
"def reload_config(self):\n pass",
"def _refreshconfig(self):\n self.config = ConfigGenerator(os.path.join(self.rundir, const.CONFIG_FILE))",
"def write_config(self, data):\n debug('Writing FAUCET config')\n # Write configuration file\n with open(self.path, 'w') as config:\n config.write(data)\n # Verify it was written properly\n with open(self.path) as config:\n newdata = config.read()\n if newdata != data:\n raise IOError(\n 'Configuration file %s not written properly.' % self.path)\n # Tell FAUCET to reload its configuration\n self.reload(config=data)",
"def use_config_file(self):\n self.config_file = self.find_config_file()\n if self.config_file:\n self.apply_config_file(self.config_file)",
"def update_from_file(self, config):\n try:\n with open(config) as config_file:\n data = yaml.safe_load(config_file)\n if not isinstance(data, dict):\n data = {}\n\n self.forklimit = data.get(\"forklimit\", self.forklimit)\n self.print_machines = data.get(\"print_machines\",\n self.print_machines)\n self.concurrent = data.get(\"concurrent\", self.concurrent)\n self.timeout = data.get(\"timeout\", self.timeout)\n\n plugin_dirs = data.get(\"plugin_dirs\", [])\n if isinstance(plugin_dirs, basestring):\n plugin_dirs = [plugin_dirs]\n self.plugin_dirs.update(plugin_dirs)\n\n hooks = data.get(\"hooks\", [])\n if isinstance(hooks, basestring):\n hooks = [hooks]\n self.hooks.update(hooks)\n\n # It's okay to ignore files that don't exist.\n except IOError:\n pass\n except (yaml.parser.ParserError, yaml.scanner.ScannerError) as err:\n raise ConfigError(\n \"Invalid Configuration (%s): %s\" % (config, err.problem))",
"def reload(self):\n self.read(self._cfg_path)",
"def testUpdateConfigFile(self):\n # Test update project field.\n gcp_setup_runner.UpdateConfigFile(self.cfg_path, \"project\",\n \"test_project\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")\n self.assertEqual(cfg.ssh_private_key_path, \"\")\n # Test add ssh key path in config.\n gcp_setup_runner.UpdateConfigFile(self.cfg_path,\n \"ssh_private_key_path\", \"test_path\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")\n self.assertEqual(cfg.ssh_private_key_path, \"test_path\")\n # Test config is not a file\n with mock.patch(\"os.path.isfile\") as chkfile:\n chkfile.return_value = False\n gcp_setup_runner.UpdateConfigFile(self.cfg_path, \"project\",\n \"test_project\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")",
"def update_config_(\n config_orig: DictConfig, path: str, update_key: str\n) -> None:\n config = read_config(path)\n config_update = OmegaConf.update(config_orig, update_key, config, force_add=True)\n return config_update"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
FILL COLUMN2 WITH MOST LIKELY VALUES BASED ON COLUMN1 | def fillgaps(column1,column2,train,test):
ddict={}
d1=test[[column1,column2]].dropna().values
d2=train[[column1,column2]].dropna().values
c1=np.array(d1[:,0].tolist()+d2[:,0].tolist())
c2=np.array(d1[:,1].tolist()+d2[:,1].tolist())
for ic1 in np.unique(c1):
ddict[ic1]=(c2[c1==ic1].mean(),c2[c1==ic1].std())
full_data = [train, test]
for dataset in full_data:
for missing in np.where(np.isnan(dataset[column2]))[0]:
m,s=ddict[dataset[column1][missing]]
if s<=0:
dataset[column2][missing]=m
else:
dataset[column2][missing]=np.random.normal(loc=m,scale=s,size=1)
return (train,test) | [
"def _fill_col1_val_where_col2_notna(col1, col2, val):\n fill_ser = col1.copy()\n fill_ser[col2.notna()] = val\n return col1.fillna(fill_ser)",
"def merge(col1, col2):\n\n new = copy.deepcopy(col1)\n\n if \"isSym\" in col1:\n for x, n in col2[\"has\"].items():\n update.add(new, x, n)\n else:\n for n in col2[\"has\"].values():\n update.add(new, n)\n\n new[\"lo\"] = min(col1[\"lo\"], col2[\"lo\"])\n new[\"hi\"] = max(col1[\"hi\"], col2[\"hi\"])\n\n return new",
"def fill_column_1(self):\n self.report.loc[:,self.fields[\"1\"]] = self.data_bucket.shareclass_isin",
"def fill_col(col, x):\n col.append(x)\n return col",
"def fillna_list(data_frame, column_name):\n g_fun = lambda x: []\n mask = data_frame[column_name].isnull()\n data_frame.loc[mask,[column_name]] = (data_frame.loc[mask,column_name]\n .apply(g_fun))\n return data_frame",
"def merged(col1, col2, nSmall, nFar):\n\n new = merge(col1, col2)\n\n return new if (nSmall and col1[\"n\"] < nSmall or col2[\"n\"] < nSmall) or (nFar and not \"isSym\" in col1 and abs(query.mid(col1) - query.mid(col2)) < nFar) or (query.div(new) <= (query.div(col1) * col1[\"n\"] + query.div(col2) * col2[\"n\"]) / new[\"n\"]) else None",
"def _fill_in_the_gaps(\n dataframe: pd.DataFrame, collapsed_blocks: pd.DataFrame, header: str\n ) -> None:\n columns: Dict[str, List[str]] = defaultdict(list)\n for start, end in zip(collapsed_blocks.index, collapsed_blocks.index[1:]):\n columns[header].extend([dataframe[header][start]] * (end - start))\n # Handle the tail of the column\n delta = len(dataframe.index) - len(columns[header])\n dataframe[header] = (\n columns[header] + [dataframe[header][collapsed_blocks.index[-1]]] * delta\n )",
"def fill_column_2(self):\n self.report.loc[:,self.fields[\"2\"]] = \\\n int(self.data_bucket.get_shareclass_infos(\"type_tpt\"))",
"def fill_missing_values (DF):\r\n DF = DF.fillna(method = \"ffill\")\r\n DF = DF.fillna(method = \"bfill\")\r\n return (DF)",
"def merge_two_rows(row1, row2):\n\n for k, v in row2.items():\n if k not in row1.keys() or not row1[k]:\n row1[k] = v\n\n return row1",
"def fill_empty_cell(puzzle: list, y_axis: int, x_axis: int, list_values: list) -> tuple:\n next_value = column_value_chooser(puzzle, y_axis, x_axis, list_values)\n list_values.remove(next_value)\n return next_value, list_values",
"def _fall_once(self, row: int, col: int) -> None:\r\n self._require_valid_row_num(row)\r\n self._require_valid_col_num(col)\r\n if self._is_empty_below(row, col):\r\n self.field[row][col], self.field[row+1][col] = self.field[row+1][col], self.field[row][col]",
"def _forward_fill(data: np.ndarray):\n last_values = None\n\n for row in data:\n if last_values is not None:\n # Get NaN values index\n idx = np.isnan(row)\n # Fill NaN values using last seen values\n row[idx] = last_values[idx]\n\n # Update last seen values\n last_values = row\n\n return data",
"def backfill(arr, arr1):\n \n arr = np.where(arr < 0.01, np.NaN, arr)\n # FIXME:\n # RuntimeWarning: invalid value encountered in less\n # arr = np.where(arr < 0.01, np.NaN, arr)\n\n x = np.isnan(arr1)\n arr1[x] = arr[x]\n return arr1",
"def switchColumn(data_file, column1, column2):\n\tdata = []\n\tfor dataLine in readData(data_file):\n\t\ttmp = dataLine[column1-1]\n\t\tdataLine[column1-1] = dataLine[column2-1]\n\t\tdataLine[column2-1] = tmp\n\t\tdata.append(dataLine)\n\twriteData(data_file, data)",
"def fillna(df, col: str, forward: bool):\n na_prev = len(df)\n report = f'fillna(\"{col}\") ' + ('forward' if forward else 'backward') + ' NA count:'\n while True:\n na = df[col].isna().sum()\n report += f' {na}'\n if na == na_prev or na == 0: break\n na_prev = na\n # df must to be sorted by (ABI, YEAR)\n df.loc[df[col].isna(), col] = df.groupby('ABI')[col].shift(1 if forward else -1)",
"def _merge_cells(row):\n new_row = []\n last = 0\n span = 1 # the minimum\n for i in range(1, len(row), 1):\n if row[i-1] != row[i]:\n new_row.append(((last,last+span), row[i-1]))\n last=i\n span=1\n continue\n span += 1\n \n new_row.append(((last,last+span), row[i]))\n return new_row",
"def _nullEmptyColumns(self, col, bcol):\n if isinstance(col, (LongArrayColumn, DoubleArrayColumn)):\n col.values = [x if y else []\n for (x, y) in izip(col.values, bcol.values)]\n else:\n col.values = [x if y else None\n for (x, y) in izip(col.values, bcol.values)]",
"def fill(dataset):\n from ..data import Dataset\n from numpy import isnan, where, arange, maximum, nonzero\n\n if not isinstance(dataset, Dataset):\n raise TypeError(\"Dataset has to be class core.data.dataset.Dataset\")\n\n data = dataset.data\n\n for _ in range(2):\n mask = isnan(data.T)\n idx = where(~mask, arange(mask.shape[1]), 0)\n maximum.accumulate(idx, axis=1, out=idx)\n data.T[mask] = data.T[nonzero(mask)[0], idx[mask]]\n data = data[::-1]\n\n dataset.change_values(data)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true if player has 3 of spades in their hand. | def has_3_spades(self):
if Card('3', 'spades') in self.hand:
return True
return False | [
"def three_of_a_kind(hand):\r\n s = [n for n,h in hand]\r\n s.sort()\r\n status = 0\r\n for i in xrange(len(s)):\r\n if s.count(s[i]) >= 3:\r\n status = 1\r\n break\r\n return bool(status)",
"def is_three_of_a_kind(hand):\n count = {c:0 for c in cards.keys()}\n for card in hand:\n count[card[0]] += 1\n for c in count:\n if count[c] == 3:\n return (True, cards[c])\n return None",
"def is_three_of_a_kind(hand):\n\tis_a_three_of_a_kind = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 3:\n\t\t\tis_a_three_of_a_kind = True\n\t\ti += 1 \n\t\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_three_of_a_kind == True:\n\t\tif hand[j] == 3 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_three_of_a_kind:\n\t\treturn True, high_card\n\telse:\n\t\treturn False",
"def free_hand(hand):\n return len(hand) < 3",
"def full_house(hand):\r\n s = [n for n,h in hand]\r\n if three_of_a_kind(hand)and len(set(s))==2:\r\n return True\r\n else:\r\n return False",
"def has_four_of_a_kind(self):\r\n for _, count in self.known_cards.items():\r\n if count == 4:\r\n return True\r\n return False",
"def four_of_a_kind(hand):\r\n s = [s1 for s1,s2 in hand]\r\n for i in xrange(len(s)):\r\n if s.count(s[i]) ==4:\r\n return True\r\n return False",
"def has_pair(self):\n self.suit_hist()\n for val in self.ranks.values():\n if val == 2:\n self.rank_per_hand['0'] = \"pair\"\n return True\n return False",
"def two_pair(hand):\r\n s = [n for n,h in hand]\r\n if not three_of_a_kind(hand)and len(set(s))== 3:\r\n return True\r\n else:\r\n return False",
"def is_3flush(holecards, flop, required_holecards=2):\n assert 0 <= required_holecards <= 2\n suit1, suit2 = [card.suit for card in holecards]\n hand = tuple(chain(holecards, flop))\n suit_counts = Counter([card.suit for card in hand])\n\n for suit in suit_counts:\n if suit_counts[suit] == 3:\n if required_holecards == 2 and (suit1 == suit2 == suit):\n return True\n elif required_holecards == 1:\n if (suit1 == suit or suit2 == suit):\n return True\n elif required_holecards == 0:\n return True\n return False",
"def isFullHouse(hand):\n if isPair(hand) and isThree(hand):\n if getPair(hand) != getThree(hand):\n return True\n return False",
"def still_in_hand(self):\n return len(self.hand.cards)!=0",
"def is_soft_hand(self):\n is_soft = False\n for i in self.cards:\n if i.value == 'ACE':\n is_soft = True\n\n return is_soft",
"def is_3straight(holecards, flop, required_holecards=2):\n assert 0 <= required_holecards <= 2\n rank1, rank2 = sorted_numerical_ranks(holecards)\n hand = tuple(chain(holecards, flop))\n\n for subseq in rank_subsequences(hand):\n x, y, z = subseq\n if x == y-1 == z-2:\n if x == 1:\n # Special case for Ace playing low, to allow\n # for the `rank in subseq` check to work\n subseq.append(14)\n if required_holecards == 2:\n if rank1 in subseq and rank2 in subseq:\n return True\n elif required_holecards == 1:\n if rank1 in subseq or rank2 in subseq:\n return True\n elif required_holecards == 0:\n return True\n return False",
"def is_daisangen(self, hand):\n count_of_dragon_pon_sets = 0\n for item in hand:\n if is_pon(item) and item[0] in [CHUN, HAKU, HATSU]:\n count_of_dragon_pon_sets += 1\n return count_of_dragon_pon_sets == 3",
"def has_threekind(self):\n if len(self.ranks) == 0:\n self.rank_hist()\n for val in self.ranks.values():\n if val >= 3:\n return True\n return False",
"def has_won(self):\n coders_card = self.get_coders().get_amount()\n if coders_card > 3:\n return True\n else:\n return False",
"def is_four_of_a_kind(hand):\n count = {c:0 for c in cards.keys()}\n for card in hand:\n count[card[0]] += 1\n for c in count:\n if count[c] == 4:\n return (True, cards[c])\n return None",
"def is_set(self, card1, card2, card3):\n if (\n (card1.color + card2.color + card3.color) % 3 == 0 and\n (card1.shape + card2.shape + card3.shape) % 3 == 0 and\n (card1.shading + card2.shading + card3.shading) % 3 == 0 and\n (card1.number + card2.number + card3.number) % 3 == 0):\n return True\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all components that match the given type and filter | def queryComponent(type=None, filter=None, all=0): | [
"def find(self, type=None, filter=None):\n if isinstance(type, (tuple, list)):\n types = type\n else:\n types = [type] if type else self._types.keys()\n\n for type in types:\n for addon in self._addons[type].itervalues():\n if filter and not filter(addon):\n continue\n yield addon",
"def get_components_by_type(\n self, component_type: Union[type, Tuple[type, ...]]\n ) -> List[Any]:\n return [c for c in self._components if isinstance(c, component_type)]",
"def filter(self, filters):",
"def get_components(self,filt):\n comps = [self.components[i] for i in xrange(len(self.header)) if filt == self.header[i]]\n return comps",
"def get_components_by_type(\n self, component_type: Union[type, Tuple[type, ...]]\n ) -> List[Any]:\n return self._manager.get_components_by_type(component_type)",
"def search_by_type(self, beertype):\n data = []\n for brand in self.brands:\n data = data + brand.filter_by_type(beertype)\n return data",
"def getFilter(self, type: int) -> int:\n ...",
"def filter_by_types(self, types: List[Hashable]) -> \"ModinDataframe\":\n pass",
"def get_entities_by_component_type(component_type):\n entitiesList = []\n for entityID in components:\n if component_type in components[entityID]:\n entitiesList.append(entities[entityID])\n return entitiesList",
"def _filter_components(components, include_components):\n include_components = {\n name.lower().replace('_', '') for name in include_components\n }\n return [\n component for component in components\n if component.__class__.__name__.lower() in include_components\n ]",
"def search_filter(query_params, query):\n if query_params.get('type') is not None:\n query = query.filter(search.c.kind == query_params.get('type'))\n return query",
"def filter_evaluations_by_type(self, type_):\n from .evaluation import Evaluation\n from .code_component import CodeComponent\n\n joined_eval = join(\n Evaluation.t, CodeComponent.t,\n ((Evaluation.m.trial_id == CodeComponent.m.trial_id) &\n (Evaluation.m.code_component_id == CodeComponent.m.id))\n )\n joined = join(\n Activation.t, joined_eval,\n ((Evaluation.m.trial_id == Activation.m.trial_id) &\n (Evaluation.m.activation_id == Activation.m.id))\n )\n query = (\n select([CodeComponent.m.name, Evaluation.m.repr])\n .select_from(joined)\n .where((Activation.m.trial_id == self.trial_id) &\n (Activation.m.id == self.id) &\n (CodeComponent.m.type == type_))\n )\n for result in relational.session.execute(query):\n yield result",
"def type_filter(self, items, types=None):\n if not types:\n return items\n allowed_items = []\n for item in items:\n if item.portal_type not in types:\n continue\n allowed_items.append(item)\n return allowed_items",
"def filter_by_type(lst, acceptedtype):\n return _filter(lst, lambda x: isinstance(x, acceptedtype))",
"def _FilterForImageType(artifacts, image_type):\n return [i for i in artifacts if i.image_type == image_type]",
"def filter_on_types(obj, types, matches):\n if INSTANCE_OF_P in obj.get().get('claims'):\n # print 'claims:', obj.get().get('claims')[u'P31']\n values = obj.get().get('claims')[INSTANCE_OF_P]\n for v in values:\n # print u'val:', v.getTarget()\n if v.getTarget().title() in types:\n matches.append(obj)",
"def get_filters(self):",
"def filter_by_type(self, beertype):\n data = []\n for beer in self.beers:\n if beer.beertype == beertype:\n data.append(beer)\n return data",
"def _scan(cls, comms, type=None, lights=None):\n\n d = comms.get('groups/')\n group_ids = []\n for key, value in d.items():\n if (type is None) or (value['type'] == type):\n group_ids.append(key)\n return [cls(comms, group_id, lights) for group_id in group_ids]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
checkKey is used to check for authentication | def checkKey(self):
# TO DO for checking API authentication
if self.apikey is None:
return False
else:
return True | [
"def check_auth_publickey(self, username, key):\n return AUTH_FAILED",
"def api_key_check():\n req_path = request.path\n method_type = request.method\n app.logger.info(\">>> path = {}, method = {}\".format(req_path, method_type))\n\n if not app_props.api_key_check:\n app.logger.debug('>>> api key check closed')\n return None\n\n if req_path in app_props.api_key_white_list:\n app.logger.info('>>> {} in white list, pass'.format(req_path))\n return None\n headers = request.headers\n api_key_from_req = headers.get('x-api-key')\n if not api_key_from_req:\n app.logger.debug('>>> enter api-key error')\n return resp_json(BaseResp.err('no x-api-key header'))\n\n key_obj = Key.query.filter_by(api_key=api_key_from_req).first()\n if key_obj:\n app.logger.debug('>>> consumer_id = {}, secret_key = {}'.format(key_obj.consumer_id, key_obj.secret_key))\n g.consumer_id = key_obj.consumer_id\n g.secret_key = key_obj.secret_key\n return None\n else:\n return resp_json(BaseResp.err('Err api key'))",
"def get_key(self, user, api_key):\n\n if user:\n if user.validate_api_key(api_key):\n return True\n return False",
"def validate_key(self, key):\n\t\treturn key",
"def check_ssh_key(self):\n return True",
"def verified(self):\r\n \r\n key = self.key\r\n f = Fernet(key) # set the key in Fernet\r\n # open key file\r\n truepass = self.__getAccesskeyfromShelf()\r\n if truepass == self.strpass:\r\n return True\r\n \r\n else:\r\n return False",
"def check_key(self, path: str) -> bool:",
"def checkAuthentication():\n if app.oidcClient is None:\n return\n if flask.request.endpoint == 'oidcCallback':\n return\n key = flask.session.get('key') or flask.request.args.get('key')\n if app.tokenMap.get(key) is None:\n if 'key' in flask.request.args:\n raise exceptions.NotAuthenticatedException()\n else:\n return startLogin()",
"def _ebCheckKey(self, reason, packet):\n reason.trap(error.ValidPublicKey)\n # if we make it here, it means that the publickey is valid\n self.transport.sendPacket(MSG_USERAUTH_PK_OK, packet)\n return failure.Failure(error.IgnoreAuthentication())",
"def check_key(request):\n try:\n access_key = request.session.get('access_key_tw', None)\n if not access_key:\n return False\n except KeyError:\n return False\n return True\n\n\t# User info",
"def get_api_key_checker(key) -> callable:\n def check_api_key():\n requested_key = request.headers.get('api-key')\n if key != requested_key:\n raise Forbidden('API key is not valid!')\n\n return check_api_key",
"def check_key(request):\n try:\n access_key = request.session.get('access_key_tw', None)\n if not access_key:\n return False\n except KeyError:\n return False\n return True",
"def remote_verifyKey(self, key, protocol):\r\n if self._authenticated.called:\r\n return Failure(InvalidKey('Only one guess is possible.'))\r\n\r\n if isinstance(protocol, Failure):\r\n self._authenticated.errback(protocol)\r\n else:\r\n if self._key != key:\r\n e = Failure(InvalidKey('Wrong key supplied.'))\r\n self._authenticated.errback(e)\r\n return e\r\n\r\n self._authenticated.callback(protocol)",
"def check_key(request):\n\ttry:\n\t\taccess_key = request.session.get('access_key_tw', None)\n\t\tif not access_key:\n\t\t\treturn False\n\texcept KeyError:\n\t\treturn False\n\treturn True",
"def get_key(self, user, api_key):\r\n from delicious_cake.models import ApiKey\r\n\r\n try:\r\n ApiKey.objects.get(user=user, key=api_key)\r\n except ApiKey.DoesNotExist:\r\n return self._unauthorized()\r\n\r\n return True",
"def ensure_key(self, key):\r\n if not self.check(key):\r\n self.create_key(key)\r\n return True\r\n return False",
"def test_check_auth_ok_key(client):\n\n auth_response = client.post('/NY/checkauth', headers={'X-AUTH': 'WRITE'})\n\n assert(auth_response.status_code == 200)\n assert(auth_response.is_json)\n assert(auth_response.get_json() == {'success': True})",
"def checkkeys(self):\n headers = self.baseheaders(pubkey=True)\n self.log(\"Clientapi checkkeys exchangekeys sending headers pprint follows\")\n spubkey, suuid = self.serverkeypair.get_pub_key()\n self.log('checkkeys server pub %s uuid %s' % (limitlines(spubkey), suuid))\n spubkey_b64 = base64.b64encode(spubkey)\n if suuid:\n suuid_server_b64 = base64.b64encode(suuid)\n headers['X-API-Serveruuid'] = suuid_server_b64\n else:\n headers['X-API-Serveruuid'] = ''\n headers['X-API-Serverpubkey'] = spubkey_b64\n\n url = '%s/checkkeys' % (self.baseurl)\n self.log(\"Clientapi about to fetch with headers : %s\" % (pformat(headers)))\n jdata = fetch_json(url, headers=headers, data=None, method='POST')\n self.log(\"Clientapi exchangekeys jdata returned : %s\" % (jdata))\n\n if 'status' in jdata:\n status = jdata['status']\n self.log('checkkeys status is %s' % (status))\n if status == 'serverkey':\n # jdata['server_pubkey'] is the base64 version of the public key\n server_pubkey = base64.b64decode(jdata['server_pubkey'])\n server_uuid = base64.b64decode(jdata['server_uuid'])\n self.log('exchangekeys server_pubkey is \"%s\" server_uuid is \"%s\"' %\n (limitlines(server_pubkey), server_uuid))\n self.serverkeypair.initafter(server_pubkey, server_uuid)\n retval = False\n elif status == 'ok':\n retval = True\n else:\n retval = False\n return retval",
"def valid_api_key(self, key):\n try:\n res = self.api_keys[key]\n except KeyError:\n res = False\n return res"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
make the cosmos and DES meds files | def make_all_cosmos_des(run, cosmos_config, des_config, catfile, tileid):
flist = files.get_cosmos_flist(tileid)
cosmos_meds = files.get_meds_file(run, tileid, 'cosmos','i')
print('making cosmos MEDS:',cosmos_meds)
maker = CosmosMEDSMaker(
config_path=cosmos_config,
catname=catfile,
flistname=flist,
)
maker.write(cosmos_meds)
for band in ['u','g','r','i','z']:
band_flist = files.get_des_flist(band)
band_meds = files.get_meds_file(run, tileid, 'des',band)
print('making DES MEDS:',band_meds)
maker = CosmosMEDSMaker(
config_path=des_config,
catname=cosmos_meds,
flistname=band_flist,
)
maker.write(band_meds) | [
"def writeNMD(filename, modes, atoms, zeros=False):\n\n if not isinstance(modes, (NMA, ModeSet, Mode, Vector)):\n raise TypeError('modes must be NMA, ModeSet, Mode, or Vector, '\n 'not {0}'.format(type(modes)))\n if modes.numAtoms() != atoms.numAtoms():\n raise Exception('number of atoms do not match')\n out = openFile(addext(filename, '.nmd'), 'w')\n\n #out.write('#!{0} -e\\n'.format(VMDPATH))\n out.write('nmwiz_load {0}\\n'.format(abspath(filename)))\n name = modes.getTitle()\n name = name.replace(' ', '_').replace('.', '_')\n if not name.replace('_', '').isalnum() or len(name) > 30:\n name = str(atoms)\n name = name.replace(' ', '_').replace('.', '_')\n if not name.replace('_', '').isalnum() or len(name) > 30:\n name = splitext(split(filename)[1])[0]\n out.write('name {0}\\n'.format(name))\n try:\n coords = atoms.getCoords()\n except:\n raise ValueError('coordinates could not be retrieved from atoms')\n if coords is None:\n raise ValueError('atom coordinates are not set')\n\n try:\n data = atoms.getNames()\n if data is not None:\n out.write('atomnames {0}\\n'.format(' '.join(data)))\n except:\n pass\n try:\n data = atoms.getResnames()\n if data is not None:\n out.write('resnames {0}\\n'.format(' '.join(data)))\n except:\n pass\n try:\n data = atoms.getResnums()\n if data is not None:\n out.write('resids ')\n data.tofile(out, ' ')\n out.write('\\n')\n except:\n pass\n try:\n data = atoms.getChids()\n if data is not None:\n out.write('chainids {0}\\n'.format(' '.join(data)))\n except:\n pass\n try:\n data = atoms.getSegnames()\n if data is not None:\n out.write('segnames {0}\\n'.format(' '.join(data)))\n except:\n pass\n\n try:\n data = atoms.getBetas()\n if data is not None:\n out.write('bfactors ')\n data.tofile(out, ' ', '%.2f')\n out.write('\\n')\n except:\n pass\n\n format = '{0:.3f}'.format\n out.write('coordinates ')\n coords.tofile(out, ' ', '%.3f')\n out.write('\\n')\n count = 0\n if isinstance(modes, Vector):\n out.write('mode 1 {0:.2f} '.format(abs(modes)))\n modes.getNormed()._getArray().tofile(out, ' ', '%.3f')\n out.write('\\n')\n count += 1\n else:\n if isinstance(modes, Mode):\n modes = [modes]\n for mode in modes:\n if (mode.getEigval() < ZERO) and not zeros:\n continue\n elif (mode.getEigval() < ZERO) and zeros:\n out.write('mode {0} {1:.2f} '.format(\n mode.getIndex()+1, np.sqrt(1/(0.0001*(mode.getIndex()+1)))))\n else:\n out.write('mode {0} {1:.2f} '.format(\n mode.getIndex()+1, mode.getVariance()**0.5))\n arr = mode._getArray().tofile(out, ' ', '%.3f')\n out.write('\\n')\n count += 1\n if count == 0:\n LOGGER.warning('No normal mode data was written. '\n 'Given modes might have 0 eigenvalues.')\n out.close()\n return filename",
"def newDCM(meta_file, shape):\n\n fileName = \"template.dcm\"\n prefix = \"1.2.826.0.1.3680043.10.271.\"\n suffix = str(datetime.datetime.today())[:10].replace('-', '') + str(time.time()).replace('.', '')\n\n\n file_meta = Dataset()\n file_meta.MediaStorageSOPClassUID = \"1.2.840.10008.5.1.4.1.1.4\" # Standard SOP CLasses: MR Image Storage\n ds = FileDataset(fileName, {},\n file_meta=file_meta, preamble=b\"\\0\" * 128)\n ds.SeriesInstanceUID = prefix + suffix # change Series Instance UID\n\n ds.SOPClassUID = \"1.2.840.10008.5.1.4.1.1.4\" # Standard SOP CLasses: MR Image Storage\n ds.ImageType = ['ORIGINAL', 'PRIMARY', 'OTHER']\n ds.PatientPosition = \"HFS\"\n ds.Manufacturer = \"GE MEDICAL SYSTEMS\"\n ds.ManufacturerModelName = \"SIGNA EXCITE\"\n ds.PositionReferenceIndicator = \"SN\"\n ds.SliceThickness = 1\n\n # Set the transfer syntax\n ds.is_little_endian = True\n ds.is_implicit_VR = True\n ds.PixelData = bytes(0)\n ds.Rows = shape[0]\n ds.Columns = shape[1]\n ds.SamplesPerPixel = 1\n ds.PhotometricInterpretation = \"MONOCHROME2\"\n ds.SeriesDescription = \"_m2d\"\n ds.PixelSpacing = [1, 1]\n ds.BitsAllocated = 16\n ds.BitsStored = 16\n ds.HighBit = 15\n ds.PixelRepresentation = 1\n ds.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian\n\n with open(meta_file) as f:\n for line in f:\n index = line.rindex(\":\")\n key = line[: index].replace(' ', '').lower()\n value = line[index + 1:].strip()\n if (key == \"studydate\"):\n ds.StudyDate = value\n if (key == \"seriesdate\"):\n ds.SeriesDate = value\n if (key == \"patientbirthdate\"):\n ds.PatientBirthDate = value\n if (key == \"studytime\"):\n ds.StudyTime = value\n if (key == \"accessionnumber\"):\n ds.AccessionNumber = value\n if (key == \"studydescription\"):\n ds.StudyDescription = value\n if (key == \"seriesdescription\"):\n ds.SeriesDescription = value\n if (key == \"patientname\"):\n ds.PatientName = value\n if (key == \"patientid\"):\n ds.PatientID = value\n if (key == \"seriesnumber\"):\n ds.SeriesNumber = value\n if (key == \"patientsex\"):\n ds.PatientSex = value;\n\n return ds",
"def writeDataCards(opt,sigExp,bkgExp,shapesURL):\n\n #create a card per category\n dcList=[]\n for icat in range(len(opt.categs)):\n cat='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n dcTxt='%s/shapes-parametric.datacard_%s.dat'%(opt.output,cat)\n dcList.append(dcTxt)\n with open(dcTxt,'w') as dc:\n dc.write('#\\n')\n dc.write('# datacard was automatically generated with generateWorkspace.py\\n')\n dc.write('# the options passed are printed below\\n')\n dc.write('# %s\\n'%opt)\n dc.write('#\\n')\n dc.write('imax *\\n')\n dc.write('jmax *\\n')\n dc.write('kmax *\\n')\n dc.write('-'*50+'\\n')\n dc.write('shapes * * {0} $PROCESS_{1} $PROCESS_$SYSTEMATIC\\n'.format(shapesURL,cat))\n dc.write('shapes data_obs * {0} $PROCESS_{1}\\n'.format(shapesURL,cat))\n dc.write('-'*50+'\\n')\n dc.write('bin %s\\n'%cat)\n dc.write('observation -1\\n')\n dc.write('-'*50+'\\n')\n dc.write('%15s %15s %15s\\n'%('bin',cat,cat))\n dc.write('%15s %15s %15s\\n'%('process','sig','bkg'))\n dc.write('%15s %15s %15s\\n'%('process','0', '1'))\n dc.write('%15s %15s %15s\\n'%('rate','%3.2f'%sigExp[icat], '%3.2f'%bkgExp[icat]))\n dc.write('-'*50+'\\n')\n \n #float the background normalization as well as the signal\n dc.write('mu_bkg{0} rateParam {0} bkg 1\\n'.format(cat))\n\n #uncertainties\n dc.write('lumi %8s %15s %15s\\n'%('lnN','1.027','-'))\n dc.write('%s_sigShape %8s %15s %15s\\n'%(cat,'shape','1','-'))\n dc.write('%s_bkgShape %8s %15s %15s\\n'%(cat,'shape','-','1'))\n dc.write('{0} autoMCStats 0.0 1\\n'.format(cat))\n \n print '\\tshapes available @',shapesURL\n print '\\tgenerated the following datacards',dcList",
"def create_files(self):\n for n, file_name, output in zip(self.square_dimensions, self.file_names, self.outputs):\n with open(self.examples_folder + file_name, 'w') as f:\n f.write('c ' + file_name + ' : ' + ' '.join(self.prefix.split('_')).capitalize() + ' CNF file python generated.\\n')\n f.write(output)\n self._created_files = True",
"def write_conformers(self, filename): # ccids):\n cnt = 0\n for confId in range(self.nconf): #ccids:\n w = Chem.SDWriter('%s_c%03d.sdf'%(filename,cnt+1))\n w.write(self.mol, confId=confId)\n w.flush()\n w.close()\n cnt += 1",
"def Construct3DMolToFile(fileName,writeFile):\r\n # Writing sets of molecules\r\n \r\n\r\n w = Chem.SDWriter(writeFile)\r\n suppl = Chem.SDMolSupplier(fileName)\r\n mols = [x for x in suppl]\r\n for mol in mols:\r\n \t# print(mol.GetProp(\"Solvent\"))\r\n \t# print(mol.GetPropNames)\r\n \tsignal.signal(signal.SIGALRM, handler)\r\n \tsignal.alarm(100)\r\n \ttry:\r\n \t\tmol3d = GetMolFromMol(mol,dimension=3)\r\n \t\tw.write(mol3d)\r\n \texcept Exception:\r\n \t\tmol3d = mol\r\n \t\tw.write(mol3d)\r\n \t\t# print(mol.GetPropsAsDict())\r\n\r\n\r\n w.close()",
"def save(self):\n comm = self.mpi_topo.cartcomm\n if comm.Get_rank() == 0:\n self.h5file.close()\n self.xdmf_file = open(self.path + \"/\" + self.filename + \".xdmf\", \"w\")\n self.xdmf_file.write(\n \"\"\"<?xml version=\"1.0\" ?>\n<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []>\n<Xdmf>\n <Domain>\n \"\"\"\n )\n if self.dim == 2:\n self.xdmf_file.write(\n \"\"\"\n <Grid Name=\"Structured Grid\" GridType=\"Uniform\">\n <Topology TopologyType=\"2DRectMesh\" NumberOfElements=\"{0}\"/>\n <Geometry GeometryType=\"VXVY\">\n \"\"\".format(\n \" \".join(map(str, self.global_size))\n )\n )\n else:\n self.xdmf_file.write(\n \"\"\"\n <Grid Name=\"Structured Grid\" GridType=\"Uniform\">\n <Topology TopologyType=\"3DRectMesh\" NumberOfElements=\"{0}\"/>\n <Geometry GeometryType=\"VXVYVZ\">\n \"\"\".format(\n \" \".join(map(str, self.global_size))\n )\n )\n for i in range(self.dim):\n self.xdmf_file.write(\n \"\"\"\n <DataItem Format=\"HDF\" Dimensions=\"{0}\">\n {1}:/x_{2}\n </DataItem>\n \"\"\".format(\n self.global_size[i], self.filename + \".h5\", i\n )\n )\n\n self.xdmf_file.write(\"</Geometry>\\n\")\n\n for k, v in self.scalars.items():\n self.xdmf_file.write(\n \"\"\"\n <Attribute Name=\"{0}\" AttributeType=\"Scalar\" Center=\"Node\">\n <DataItem Format=\"HDF\" Dimensions=\"{1}\">\n {2}\n </DataItem>\n </Attribute>\n \"\"\".format(\n k, \" \".join(map(str, self.global_size[::-1])), v\n )\n )\n\n for k, v in self.vectors.items():\n self.xdmf_file.write(\n \"\"\"\n <Attribute Name=\"{0}\" AttributeType=\"Vector\" Center=\"Node\">\n <DataItem Format=\"HDF\" Dimensions=\"{1} {2}\">\n {3}\n </DataItem>\n </Attribute>\n \"\"\".format(\n k, \" \".join(map(str, self.global_size[::-1])), self.dim, v\n )\n )\n\n self.xdmf_file.write(\"</Grid>\\n</Domain>\\n</Xdmf>\\n\")\n self.xdmf_file.close()",
"def demo_write():\n\n d8 = generate_structure(\n structure = 'diamond',\n cell = 'conv',\n )\n\n # Write an XYZ file\n d8.write('diamond8.xyz') # loses cell info, appropriate for molecules\n \n # Write an XSF file\n d8.write('diamond8.xsf')\n\n # Write a POSCAR file\n d8.write('diamond8.POSCAR')\n\n # Print the contents of each file\n print('\\nXYZ file:')\n os.system('cat diamond8.xyz')\n\n print('\\nXSF file:')\n os.system('cat diamond8.xsf')\n\n print('\\nPOSCAR file:')\n os.system('cat diamond8.POSCAR')",
"def _write_dmigs(self, bdf_file, size=8, is_double=False):\n msg = []\n for unused_name, dmig in sorted(self.dmig.items()):\n msg.append(dmig.write_card(size, is_double))\n for unused_name, dmi in sorted(self.dmi.items()):\n msg.append(dmi.write_card(size, is_double))\n for unused_name, dmij in sorted(self.dmij.items()):\n msg.append(dmij.write_card(size, is_double))\n for unused_name, dmiji in sorted(self.dmiji.items()):\n msg.append(dmiji.write_card(size, is_double))\n for unused_name, dmik in sorted(self.dmik.items()):\n msg.append(dmik.write_card(size, is_double))\n bdf_file.write(''.join(msg))",
"def generate_epics_db(self):\n if (self.verbose):\n # Generate digital application related databases and configuration files\n print(\"==================================================\")\n print(\"== Generating EPICS DB and configuration files: ==\")\n print(\"==================================================\")\n \n print(\"----------------------------\")\n print(\"-- Digital applications --\")\n print(\"----------------------------\")\n for app in self.digital_apps:\n app_path = '{}app_db/{}/{:04}/{:02}/'.format(self.dest_path, app[\"cpu_name\"], app[\"crate_id\"], app[\"slot_number\"])\n app_prefix = 'MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"])\n if (self.verbose):\n print(\"Application path : {}\".format(app_path))\n print(\"Application prefix : {}\".format(app_prefix))\n \n self.__write_dig_app_id_confg(path=app_path, macros={\"ID\":str(app[\"app_id\"])})\n\n # Add the IOC name environmental variable for the Link Nodes\n self.__write_header_env(path=app_path, macros={\"MPS_LINK_NODE\":app[\"link_node_name\"],\n \"MPS_DB_VERSION\":self.config_version,\n \"DATE\":datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S')})\n self.__write_iocinfo_env(path=app_path, macros={\"AREA\":app[\"link_node_area\"].upper(),\n \"LOCATION\":app[\"link_node_location\"].upper(),\n \"LOC_IDX\":app['link_node_location'].upper().replace('MP', ''),\n \"C_IDX\":unicode(app['card_index'])})\n if self.link_nodes[app[\"link_node_name\"]]['type'] == 'Digital':\n self.__write_prefix_env(path=app_path, macros={\"P\":app_prefix})\n self.__write_mps_db(path=app_path, macros={\"P\":app_prefix, \"THR_LOADED\":\"1\"})\n self.__write_app_id_config(path=app_path, macros={\"ID\":\"0\"}) # If there are no analog cards, set ID to invalid\n\n has_virtual = False\n for device in app[\"devices\"]:\n device_prefix = \"{}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"])\n\n if (self.verbose):\n print(\" Device prefix : {}\".format(device_prefix))\n\n for input in device[\"inputs\"]:\n\n if app[\"virtual\"]:\n has_virtual = True\n if (input[\"bit_position\"]>=32):\n scan = \".2 second\"\n if (input['name'] == 'WDOG'):\n if (\"MPSHEARTBEAT\" in input[\"input_pv\"]):\n scan = \".1 second\"\n channel = input[\"bit_position\"] - 32\n vmacros = { \"P\":input[\"input_pv\"]+'_THR',\n \"R\":input[\"name\"],\n \"N\":self.mps_name.getDeviceInputNameFromId(input[\"db_id\"]),\n \"INPV\":input[\"input_pv\"],\n \"ALSTATE\":str(input[\"alarm_state\"]),\n \"NALSTATE\":str(to_bool(not input[\"alarm_state\"])),\n \"ZSV\":input[\"zero_severity\"],\n \"OSV\":input[\"one_severity\"],\n \"BIT\":\"{:02d}\".format(channel).format,\n \"ZNAM\":input[\"zero_name\"],\n \"ONAM\":input[\"one_name\"], \n \"GID\":str(app[\"app_id\"]),\n \"SCAN\":scan}\n if (input['name'] == 'WDOG'):\n self.__write_virtual_wdog_db(path=app_path, macros=vmacros)\n else:\n self.__write_virtual_db(path=app_path, macros=vmacros)\n\n\n macros = { \"P\":device_prefix,\n \"R\":input[\"name\"],\n \"BIT\":input[\"bit_position\"],\n \"ZNAM\":input[\"zero_name\"],\n \"ONAM\":input[\"one_name\"] }\n\n if (self.verbose):\n print(\" Digital Input : {}\".format(input[\"name\"]))\n\n if (self.verbose):\n print(\"----------------------------\")\n\n print(\"==================================================\")\n print(\"\")\n\n # Generates analog application related databases and configuration files\n if (self.verbose):\n print(\"--------------------------\")\n print(\"-- Analog applications --\")\n print(\"--------------------------\")\n for app in self.analog_apps:\n app_path = '{}app_db/{}/{:04}/{:02}/'.format(self.dest_path, app[\"cpu_name\"], app[\"crate_id\"], app[\"slot_number\"])\n app_prefix = 'MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"])\n if (self.verbose):\n print(\"Application path : {}\".format(app_path))\n print(\"Application prefix : {}\".format(app_prefix))\n\n self.__write_mps_db(path=app_path, macros={\"P\":app_prefix, \"THR_LOADED\":\"0\"})\n self.__write_app_id_config(path=app_path, macros={\"ID\":str(app[\"app_id\"])})\n self.__write_thresholds_off_config(path=app_path)\n\n # Add the IOC name environmental variable for the Link Nodes\n if app[\"analog_link_node\"]:\n self.__write_header_env(path=app_path, macros={\"MPS_LINK_NODE\":app[\"link_node_name\"],\n \"MPS_DB_VERSION\":self.config_version,\n \"DATE\":datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S')})\n\n self.__write_iocinfo_env(path=app_path, macros={\"AREA\":app[\"link_node_area\"].upper(),\n \"LOCATION\":app[\"link_node_location\"].upper(),\n \"LOC_IDX\":app['link_node_location'].upper().replace('MP', ''),\n \"C_IDX\":unicode(app['card_index'])})\n self.__write_prefix_env(path=app_path, macros={\"P\":app_prefix})\n\n spare_channels = range(0,6)\n for device in app[\"devices\"]:\n device_prefix = \"{}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"])\n\n if (self.verbose):\n print(\" Device prefix : {}\".format(device_prefix))\n\n if (device[\"type_name\"] not in self.non_link_node_types):\n macros = { \"P\": app_prefix,\n \"CH\":str(device[\"channel_index\"]),\n \"CH_NAME\":device[\"device_name\"],\n \"CH_PVNAME\":device_prefix,\n \"CH_SPARE\":\"0\"\n }\n self.__write_link_node_channel_info_db(path=app_path, macros=macros)\n processing = 0\n ch = device['channel_index']\n if (device[\"type_name\"] == \"CBLM\"):\n processing = 1\n if (device[\"type_name\"] == \"KICK\"):\n processing = 1\n int0 = device['channel_index']*4\n int1 = device['channel_index']*4 + 1\n macros = { \"CH\":format(device['channel_index']),\n \"PROC\":format(processing),\n \"INT0\":format(int0),\n \"INT1\":format(int1)\n }\n self.__write_ana_config(path=app_path, macros=macros)\n spare_channels[device[\"channel_index\"]] = -1\n for fault in device[\"faults\"].values():\n bsa_slot = fault['integrators'][0]*6 + device[\"channel_index\"]\n macros = { \"P\":app_prefix,\n \"R\":'ANA_BSA_DATA_{}'.format(bsa_slot),\n \"P_DEV\":device_prefix,\n \"R_DEV\":self.get_analog_type_name(device[\"type_name\"]),\n \"FAULT\":fault['name'],\n \"EGU\":self.get_app_units(device[\"type_name\"],fault[\"name\"])\n }\n self.__write_analog_db(path=app_path, macros=macros)\n macros = { \"P\":device_prefix,\n \"BAY\":format(device[\"bay_number\"]),\n \"APP\":self.get_app_type_name(device[\"type_name\"]),\n \"FAULT\":fault[\"name\"],\n \"FAULT_INDEX\":self.get_fault_index(device[\"type_name\"], fault[\"name\"], device[\"channel_number\"]),\n \"DESC\":fault[\"description\"],\n \"EGU\":self.get_app_units(device[\"type_name\"],fault[\"name\"]),\n \"SLOPE\":unicode(self.get_slope(device[\"type_name\"])),\n \"OFFSET\":unicode(self.get_offset(device[\"type_name\"]))}\n self.__write_thr_base_db(path=app_path, macros=macros)\n # Generate PV for all possible thresholds, even if not defined in database\n for bit in range(0,8):#fault[\"bit_positions\"]:\n fault_prefix = \"{}_T{}\".format(fault[\"name\"], bit)\n macros[\"BIT_POSITION\"] = str(bit)\n self.__write_thr_db(path=app_path, macros=macros)\n if (self.verbose):\n print(\" Fault prefix : {}\".format(fault_prefix))\n\n\n for ch in spare_channels:\n if ch > -1:\n macros = { \"P\": app_prefix,\n \"CH\":str(ch),\n \"CH_NAME\":\"Spare\",\n \"CH_PVNAME\":\"None\",\n \"CH_SPARE\":\"1\"\n }\n self.__write_link_node_channel_info_db(path=app_path, macros=macros)\n\n #\n # Write db information about slots of each link node\n #\n for app in self.analog_apps + self.digital_apps:\n app_path = '{}app_db/{}/{:04}/{:02}/'.format(self.dest_path, app[\"cpu_name\"], app[\"crate_id\"], app[\"slot_number\"])\n link_node_info=self.link_nodes[app[\"link_node_name\"]]\n #print link_node_info\n if not 'exported' in link_node_info:\n for slot in range(2,8):\n if slot in link_node_info['slots']:\n macros = { \"P\": app[\"app_prefix\"],\n \"SLOT\": str(slot),\n \"SLOT_NAME\": link_node_info['slots'][slot]['type'],\n \"SLOT_PVNAME\": link_node_info['slots'][slot]['pv_base'],\n \"SLOT_SPARE\": \"0\"}\n else:\n macros = { \"P\": app[\"app_prefix\"],\n \"SLOT\": str(slot),\n \"SLOT_NAME\": \"Spare\",\n \"SLOT_PVNAME\": \"Spare\",\n \"SLOT_SPARE\": \"1\"}\n\n self.__write_link_node_slot_info_db(path=app_path, macros=macros)\n\n # Add CH_* PVs for digital-only link nodes. These are added before \n # only if the LN is Mixed or Analog\n if link_node_info['type'] == 'Digital':\n for ch in spare_channels:\n macros = { \"P\": app[\"app_prefix\"],\n \"CH\":str(ch),\n \"CH_NAME\":\"Not Available\",\n \"CH_PVNAME\":\"None\",\n \"CH_SPARE\":\"1\"\n }\n self.__write_link_node_channel_info_db(path=app_path, macros=macros)\n\n link_node_info['exported']=True\n\n #\n # Add Link Node related information\n #\n #for ln_name,ln in self.link_nodes.items():\n # if \"lc1_node_id\" not in ln:\n # continue\n # if \"dig_app_id\" not in ln:\n # continue\n # print ln[\"lc1_node_id\"] + ' ' + ln[\"type\"] + ' ' + ln[\"dig_app_id\"]\n for ln_name,ln in self.link_nodes.items():\n self.__write_lc1_info_config(ln)\n self.__write_link_node_info_db(ln_name, ln)\n\n if (self.verbose):\n print(\"--------------------------\")",
"def export_model_description(md: ModelDescription) -> bytes:\n\n # ---------------- write model description -------------------\n\n fmd = ET.Element(\"fmiModelDescription\")\n fmd.set(\"fmiVersion\", \"2.0\")\n fmd.set(\"modelName\", md.modelName)\n fmd.set(\"guid\", md.guid)\n fmd.set(\"author\", md.author)\n fmd.set(\"generationDateAndTime\", md.generationDateAndTime)\n fmd.set(\"variableNamingConvention\", md.variableNamingConvention)\n fmd.set(\"generationTool\", md.generationTool)\n fmd.set(\"description\", md.description)\n\n # CoSimulation\n cs = ET.SubElement(fmd, \"CoSimulation\")\n cs.set(\"modelIdentifier\", md.CoSimulation.modelIdentifier)\n cs.set(\n \"needsExecutionTool\", str(md.CoSimulation.needsExecutionTool).lower(),\n )\n cs.set(\n \"canHandleVariableCommunicationStepSize\",\n str(md.CoSimulation.canHandleVariableCommunicationStepSize).lower(),\n )\n cs.set(\n \"canInterpolateInputs\", str(md.CoSimulation.canInterpolateInputs).lower(),\n )\n\n cs.set(\n \"maxOutputDerivativeOrder\", str(md.CoSimulation.maxOutputDerivativeOrder),\n )\n cs.set(\n \"canRunAsynchronuously\", str(md.CoSimulation.canRunAsynchronuously).lower(),\n )\n cs.set(\n \"canBeInstantiatedOnlyOncePerProcess\",\n str(md.CoSimulation.canBeInstantiatedOnlyOncePerProcess).lower(),\n )\n cs.set(\n \"canNotUseMemoryManagementFunctions\",\n str(md.CoSimulation.canNotUseMemoryManagementFunctions).lower(),\n )\n cs.set(\n \"canGetAndSetFMUstate\", str(md.CoSimulation.canGetAndSetFMUstate).lower(),\n )\n cs.set(\n \"canSerializeFMUstate\", str(md.CoSimulation.canSerializeFMUstate).lower(),\n )\n cs.set(\n \"providesDirectionalDerivative\",\n str(md.CoSimulation.providesDirectionalDerivative).lower(),\n )\n\n # 2.2.4 p.42) Log categories:\n cs = ET.SubElement(fmd, \"LogCategories\")\n for ac in md.logCategories:\n c = ET.SubElement(cs, \"Category\")\n c.set(\"name\", ac)\n\n # 2.2.7 p.47) ModelVariables\n mvs = ET.SubElement(fmd, \"ModelVariables\")\n\n variable_index = 0\n\n for var in md.modelVariables:\n var.variability\n value_reference = str(var.value_reference)\n\n idx_comment = ET.Comment(f'Index of variable = \"{variable_index + 1}\"')\n mvs.append(idx_comment)\n sv = ET.SubElement(mvs, \"ScalarVariable\")\n sv.set(\"name\", var.name)\n sv.set(\"valueReference\", value_reference)\n sv.set(\"variability\", var.variability)\n sv.set(\"causality\", var.causality)\n\n if var.description:\n sv.set(\"description\", var.description)\n\n if var.initial:\n i = var.initial\n sv.set(\"initial\", i)\n\n val = ET.SubElement(sv, var.dataType)\n\n # 2.2.7. p.48) start values\n if var.initial in {\"exact\", \"approx\"} or var.causality == \"input\":\n assert (\n var.start != None\n ), \"a start value must be defined for intial ∈ {exact, approx}\"\n val.set(\"start\", var.start)\n\n variable_index += 1\n\n ms = ET.SubElement(fmd, \"ModelStructure\")\n\n # 2.2.8) For each output we must declare 'Outputs' and 'InitialUnknowns'\n outputs = [\n (idx + 1, o)\n for idx, o in enumerate(md.modelVariables)\n if o.causality == \"output\"\n ]\n\n if outputs:\n os = ET.SubElement(ms, \"Outputs\")\n for idx, o in outputs:\n ET.SubElement(os, \"Unknown\", {\"index\": str(idx), \"dependencies\": \"\"})\n\n os = ET.SubElement(ms, \"InitialUnknowns\")\n for idx, o in outputs:\n ET.SubElement(os, \"Unknown\", {\"index\": str(idx), \"dependencies\": \"\"})\n\n # FMI requires encoding to be encoded as UTF-8 and contain a header:\n #\n # See 2.2 p.28\n return ET.tostring(fmd, pretty_print=True, encoding=\"utf-8\", xml_declaration=True)",
"def generate_file(material_id):\n apr=get_doc_from_MP(material_id)\n mat_list=generate_matrix(apr)\n formu=POSCAR_title(apr)\n cell_for=generate_cell_formula(apr)\n needed_dos=generate_dos_str(material_id)\n revise_dos=dos_into_string(needed_dos)\n ordered_list=generate_ordered_list(revise_dos)\n my_ordered_elements=generate_ordered_elements(revise_dos,ordered_list)\n my_ordered_numbers=generate_ordered_numbers(revise_dos,ordered_list,cell_for)\n generate_POSCAR(formu,mat_list,my_ordered_elements,my_ordered_numbers,revise_dos)",
"def makeModelFiles(self,niter=20,targetrms=1.0,nlayers=100,nlperdec=30,\n z1layer=50,bwidth=200,trigger=.75,savepath=None,rhostart=100,\n occampath=r\"c:\\Peacock\\PHD\\OCCAM\\MakeFiles\"):\n #get the base name of data file \n dfnb=os.path.basename(self.datafn)\n \n #put data file into the same directory as MakeModel2DMT\n if os.path.dirname(self.datafn)!=occampath:\n shutil.copy(self.datafn,os.path.join(occampath,dfnb))\n \n #write input file for MakeModel2DMT\n mmfid=open(os.path.join(occampath,'inputMakeModel.txt'),'w')\n mmfid.write(dfnb+'\\n')\n mmfid.write(str(niter)+'\\n') \n mmfid.write(str(targetrms)+'\\n') \n mmfid.write(str(nlayers)+'\\n')\n mmfid.write(str(nlperdec)+'\\n')\n mmfid.write(str(z1layer)+'\\n')\n mmfid.write(str(bwidth)+'\\n')\n mmfid.write(str(trigger)+'\\n')\n mmfid.write('\\n')\n mmfid.close()\n \n #get current working directory\n cdir=os.getcwd() \n \n #change directory path to occam path\n os.chdir(occampath) \n \n #---call MakeModel2DMT---\n subprocess.os.system(\"MakeModel2DMT < inputMakeModel.txt\")\n \n #change back to original working directory \n os.chdir(cdir)\n \n if savepath==None:\n savepath=os.path.dirname(self.datafn)\n \n if not os.path.exists(savepath):\n os.mkdir(savepath)\n \n meshfn=os.path.join(savepath,'MESH') \n inmodelfn=os.path.join(savepath,'INMODEL') \n startupfn=os.path.join(savepath,'startup') \n \n #copy ouput files to savepath\n shutil.copy(os.path.join(occampath,'MESH'),meshfn)\n shutil.copy(os.path.join(occampath,'INMODEL'),inmodelfn)\n shutil.copy(os.path.join(occampath,'startup'),startupfn)\n shutil.copy(os.path.join(occampath,'inputMakeModel.txt'),\n os.path.join(savepath,'inputMakeModel.txt'))\n if not os.path.exists(os.path.join(savepath,dfnb)):\n shutil.copy(self.datafn,os.path.join(savepath,dfnb))\n if os.path.getctime(os.path.join(savepath,dfnb))<\\\n os.path.getctime(self.datafn):\n shutil.copy(self.datafn,os.path.join(savepath,dfnb))\n \n \n# #rewrite mesh so it contains the right number of columns and rows\n# rewriteMesh(meshfn)\n \n #write startup file to have the starting desired starting rho value\n ifid=open(startupfn,'r')\n ilines=ifid.readlines()\n ifid.close()\n \n if rhostart!=100:\n #make startup model a homogeneous half space of rhostart\n rhostart=np.log10(rhostart)\n ifid=open(startupfn,'w')\n for line in ilines:\n if line.find('2.000000')>=0:\n line=line.replace('2.000000','%.6f' % rhostart)\n ifid.write(line)\n ifid.close()\n \n print('Be sure to check the INMODEL file for clumped numbers near the bottom.')\n print('Also, check the MESH and startup files to make sure they are correct.')\n \n self.meshfn=meshfn\n self.inmodelfn=inmodelfn\n self.startupfn=startupfn",
"def create_dnz_file(args):\n\n file = open(args.o, 'w')\n\n file.write(\"% ----DATA VARIABLES----\\n\\n\")\n file.write(\"t=\" + str(args.t) + \";\" + \"%number of attributes\\n\")\n file.write(\"k=\" + str(args.k) + \";\" + \"%max length of the support set\\n\")\n file.write(\"n=\" + str(args.n) + \";\" + \"%number of positive instances\\n\")\n file.write(\"m=\" + str(args.m) + \";\" + \"%number of negative instances\\n\")\n file.write(\"c=\" + str(args.c) + \";\" + \"%number of atMostOne Constraints\\n\\n\")\n\n file.write(\"% ----OMEGAS----\\n\\n\")\n\n omega_p = generate_omega_data(args.t, args.n, args.b)\n file.write(\"omegap= \" + omega_to_mz(omega_p) + \"\\n\\n\")\n\n omega_n = generate_disjoint_omega_data(omega_p, args.m, args.b)\n file.write(\"omegan= \" + omega_to_mz(omega_n) + \"\\n\\n\")\n\n file.write(\"% ----CONSTRAINS----\\n\\n\")\n at_most_one = generate_at_most_one(int(args.t/2), args.c, 1, args.t)\n file.write(\"atMostOne=\" + at_most_one_to_mz(at_most_one))",
"def test_dac_tdms(self):\n\n # create TDMS model from one file\n tdms_parser = dac_tdms.parser()\n arguments = tdms_parser.parse_args(SLYCAT_CONNECTION + TDMS_FILE +\n TEST_MARKING + TEST_PROJECT)\n dac_tdms.create_model(arguments, dac_tdms.log)",
"def mk_dos_folder(chem_form, MP_ID) -> str:\n os.chdir('../')\n if os.path.isdir(chem_form + '_' + MP_ID):\n\n # os.chdir(chem_form)\n os.chdir(chem_form + '_' + MP_ID)\n os.chdir('nscf')\n tmpsrc=os.getcwd()+\"/tmp\"\n\n res_dict = pw_parser()\n\n if res_dict['status'] == \"DONE\":\n os.chdir('../')\n os.mkdir('dos')\n os.chdir('dos')\n tmpdes=os.getcwd()+\"/tmp\"\n shutil.copytree(tmpsrc,tmpdes)\n else:\n raise FileNotFoundError(\"Folder is not created.\")\n\n return res_dict",
"def write_feats(self):\n\n self.mk_outputdir()\n self.selfupdate()\n\n with open(os.path.join(self.outputdir, \"{}.mat\".format(self.basename)), \"w\") as f:\n f.write(self.mat)\n\n with open(os.path.join(self.outputdir, \"{}.con\".format(self.basename)), \"w\") as f:\n f.write(self.con)\n\n with open(os.path.join(self.outputdir, \"{}.grp\".format(self.basename)), \"w\") as f:\n f.write(self.grp)",
"def save2file(self, filename): \r\n today = datetime.date.today()\r\n \r\n artistlist=sorted(self.CDlib)\r\n write_file = open(filename, 'w')\r\n write_file.write('='*50+'\\n')\r\n write_file.write('==CD collection of %s saved %s ==\\n' %(self.owner,str(today)))\r\n write_file.write('='*50+'\\n') \r\n for artist in artistlist:\r\n for cd in self.CDlib[artist]:\r\n write_file.write(artist)\r\n write_file.write(' - ')\r\n write_file.write(cd)\r\n write_file.write('\\n')\r\n write_file.close()",
"def write_scram_toolfiles(self):\n from string import Template\n\n mkdirp(join_path(self.spec.prefix.etc, 'scram.d'))\n\n values = {}\n values['VER'] = self.spec.version\n values['PFX'] = self.spec.prefix\n\n fname = 'uuid-cms.xml'\n template = Template(\"\"\"<tool name=\"uuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)\n\n fname = 'libuuid.xml'\n template = Template(\"\"\"<tool name=\"libuuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
write the cutouts for the specified type | def _write_psf_cutouts_hst(self):
print('writing psf cutouts')
obj_data=self.obj_data
psf_data=self.psf_data
nfile=self.image_info.size
nobj=obj_data.size
cutout_hdu = self.fits['psf']
for iobj in range(nobj):
if (iobj+1) % 100 == 0:
print(' %d/%d' % (iobj+1,obj_data.size))
# HST psf is same for every cutout, in fact ncut should always
# be 1
try:
psf_im = self.psf_data.get_psf(iobj)
except AttributeError:
psf_im = None
ncut=obj_data['ncutout'][iobj]
for icut in range(ncut):
if psf_im is None:
row = obj_data['orig_row'][iobj, icut]
col = obj_data['orig_col'][iobj, icut]
file_id = obj_data['file_id'][iobj,icut]
p = self.psf_data[file_id]
psf_im = p.get_rec(row,col)
expected_psf_shape = (
obj_data['psf_row_size'][iobj,icut],
obj_data['psf_col_size'][iobj,icut],
)
file_id = obj_data['file_id'][iobj, icut]
row = obj_data['orig_row'][iobj, icut]
col = obj_data['orig_col'][iobj, icut]
start_row = obj_data['psf_start_row'][iobj, icut]
if psf_im.shape != expected_psf_shape:
raise ValueError("psf size mismatch, expected %s "
"got %s" % (expected_psf_shape, psf_im.shape))
cutout_hdu.write(psf_im, start=start_row) | [
"def _write_cutout(self,\n iobj,\n icut,\n cutout_hdu,\n im_data,\n cutout_type):\n\n if cutout_type=='psf':\n start_row = self.obj_data['psf_start_row'][iobj,icut]\n else:\n start_row = self.obj_data['start_row'][iobj,icut]\n\n cutout_hdu.write(im_data, start=start_row)",
"def _write_object_cutouts(self, iobj):\n\n obj_data=self.obj_data\n nobj=obj_data.size\n assert iobj < nobj\n\n image_data = self.producer.getStamps(obj_data[iobj])\n\n ncut = len(image_data)\n nexp = obj_data['ncutout'][iobj]\n if ncut != nexp:\n raise ValueError(\"expected %d cutouts, got %d\" % (nexp,ncut))\n\n\n # fill in obj_data for the stamps\n self._fill_obj_data(iobj, image_data)\n\n box_size = obj_data['box_size'][iobj]\n # write image data\n for cutout_type in self['cutout_types'] + ['psf']:\n #print(' %d: writing %s cutouts' % (iobj,cutout_type))\n\n cutout_hdu = self._get_cutout_hdu(cutout_type)\n\n for icut, idata in enumerate(image_data):\n stamp = idata['stamp']\n seg_map = idata['seg_map']\n\n if stamp is None:\n print(\" stamp\",icut,\"is None\")\n continue\n\n if idata['image_id'] not in self._idlist:\n self._append_image_info(idata)\n\n obj_data['file_id'][iobj,icut] = \\\n self._idlist.index(idata['image_id'])\n\n if cutout_type == 'seg':\n if seg_map is None and icut != 0:\n assert self['fake_se_seg']\n # grab the image and make a fake\n # seg map like that\n\n im_data = numpy.zeros( [box_size]*2, dtype='i4')\n else:\n im_data = numpy.array(seg_map.array, dtype='i4', copy=False)\n\n elif cutout_type=='psf':\n # psfs are variable in size\n im_data = self._extract_psf_image(stamp, idata['image_pos'])\n\n obj_data['psf_box_size'][iobj,icut] = im_data.shape[0]\n obj_data['psf_start_row'][iobj,icut] = self.current_psf_position\n\n # increment for the next write\n self.current_psf_position += im_data.size\n\n else:\n im_data = self._extract_image(\n stamp,\n cutout_type,\n box_size,\n )\n\n self._write_cutout(\n iobj,\n icut,\n cutout_hdu,\n im_data,\n cutout_type,\n )",
"def _write_moleculetype(top_file: IO, mol_name: str, nrexcl: int = 3):\n top_file.write(\"[ moleculetype ]\\n\")\n top_file.write(\"; Name\\tnrexcl\\n\")\n top_file.write(f\"{mol_name}\\t{nrexcl}\\n\\n\")",
"def objcutouts(objid,size=40,outdir='./'):\n \n obj = qc.query(sql=\"select * from nsc_dr2.object where id='%s'\" % objid,fmt='table',profile='db01')\n meas = qc.query(sql=\"select * from nsc_dr2.meas where objectid='%s'\" % objid,fmt='table',profile='db01')\n nmeas = len(meas)\n print(str(nmeas)+' measurements for '+objid)\n meascutout(meas,obj,size=size,outdir=outdir)",
"def dispatchWriter(self, commandType, command):\n print command\n\n self.outfile.write('// **** '+string.join(command)+' **** //\\n')\n\n if (commandType == 'C_ARITHMETIC'):\n self.writeArithmetic(command[0])\n elif (commandType == 'C_POP' or commandType == 'C_PUSH'):\n self.writePushPop(command[0], command[1], command[2])\n elif (commandType == 'C_GOTO'):\n self.writeGoto(command[1])\n elif (commandType == 'C_IF'):\n self.writeIf(command[1])\n elif (commandType == 'C_FUNCTION'):\n self.writeFunction(command[1], command[2])\n elif (commandType == 'C_RETURN'):\n self.writeReturn()\n elif (commandType == 'C_CALL'):\n self.writeCall(command[1], command[2])\n elif (commandType == 'C_LABEL'):\n self.writeLabel(command[1])\n else:\n # Should probably throw some error here, but \n # its an academic exercise, not for production.\n pass",
"def write_output(self):",
"def _write(self, *args, **kwargs):\n raise NotImplementedError('Writing OUTCAR files is not supported.')",
"def writeOutput(self, output):",
"def pltoutput(simname, varname, outtype):\n # output file name for hardcopy\n ofname = os.getcwd()+\"/img/\"+simname+\"_\"+varname\n\n if (outtype == \"pdf\"):\n plt.savefig(ofname+\".pdf\")\n\n elif (outtype == \"png\"):\n plt.savefig(ofname+\".png\")\n\n else:\n plt.show()\n\n return",
"def write_distr(all_types,atom_types,charges,atom_volumes):\n type_charges = []\n type_volumes = []\n temp_charges = []\n temp_volumes = []\n for i in range(len(all_types)):\n temp_charges.append(all_types[i]+' ')\n temp_volumes.append(all_types[i]+' ')\n for j in range(len(charges)):\n if atom_types[j] == all_types[i]:\n temp_charges.append(str(charges[j])+' ')\n temp_volumes.append(str(atom_volumes[j])+' ')\n temp_charges.append('\\n')\n temp_volumes.append('\\n')\n type_charges.append(temp_charges)\n type_volumes.append(temp_volumes)\n temp_charges = []\n temp_volumes = []\n charge_file = open('charges.out','w')\n volume_file = open('volumes.out','w')\n for k in range(len(type_charges)):\n charge_file.writelines(type_charges[k])\n volume_file.writelines(type_volumes[k])\n return",
"def write(self):\n if self.tree_list:\n if self.output_format == \"newick\":\n self.trees.write(path=self.output, schema=self.output_format, suppress_rooting=True)\n else:\n self.trees.write(path=self.output, schema=self.output_format)\n else:\n if self.output_format == \"newick\":\n self.tree.write(path=self.output, schema=self.output_format, suppress_rooting=True)\n else:\n self.tree.write(path=self.output, schema=self.output_format)",
"def write_report(report, ftype):\n if ftype == 'text':\n msg = '{} disks have been removed\\n'.format(len(report))\n msg += 'To replace them, run:\\n'\n for device, action_args in report.items():\n args = json.dumps(action_args, separators=(' ', '='))\n args = args.replace('{', '').replace('}', '').replace('\"', '')\n msg += 'juju run-action {} add-disk {} {}'.format(\n hookenv.local_unit(), 'osd-devices=' + device, args)\n else:\n msg = json.dumps(report)\n\n hookenv.action_set({'message': msg})",
"def write_mfcc_data_to_file(self, work_type):\n f = open(path_to_mfcc + \"base/\" + work_type + \"_base.txt\", \"w\")\n f.write(str(len(self.mfcc[work_type])) + \"\\n\")\n for i in self.mfcc[work_type].keys():\n f.write(str(i).lower() + \"\\n\")\n f.write(str(len(self.mfcc[work_type][i])) + \"\\n\")\n for j in self.mfcc[work_type][i]:\n f.write(j + \"\\n\")\n f.close()",
"def write_cut_metadata(ofile, event_cuts, dom_cuts):\n\n infile = tables.open_file(ofile, 'a')\n\n infile.root._v_attrs.event_cuts = event_cuts\n infile.root._v_attrs.dom_cuts = dom_cuts\n\n infile.close()",
"def write(self, info, measurements):\n raise NotImplementedError",
"def dump_cuts_list(self, file_name):\n assert(file_name is not None)\n with open(file_name, 'w') as fd:\n uids = self._cuts.keys()\n uids.sort()\n for cut_uid in uids:\n cut = self._cuts[cut_uid]\n fd.write(cut.get_cost_var() + \" \" + str(cut.get_cost()) + \"\\n\")\n return",
"def write_subset(self, out):\n with open(out, 'w') as f:\n for pair in self.pairs:\n if pair.subset_label is not None:\n f.write(\"{} {}\\n\".format(pair.image_path, pair.subset_label))",
"def writeClumptoDump(self,ID):\n clumpxyz = self.clumpcat[ID][:3]\n r2 = (self.disc.xyzh[0]-clumpxyz[0])**2 + (self.disc.xyzh[1]-clumpxyz[1])**2\n members = np.sqrt(r2) < self.annulus #members are all particles within radial annulus\n\n gas = self.disc.itype == 1\n dust = self.disc.itype == 2\n\n dustfrac = self.disc.dustfrac*1e8 #as I originally set dust-to-gas=1e-10\n\n #Calculate temperatures from thermal energies\n k = 1.38064852e-16 #ergs\n mH = 1.6735575e-24 #grams\n gmw = 2.381 #mean mass taken from Phantom\n N = sum(gas*self.disc.massofgas)*self.umass/mH/gmw #number of atoms\n temp = 2.*self.disc.utherm*self.uenerg/3./k/N\n\n\t\tutime = self.utime/(60*60*24*365.25)\n\n\t\t#create arrays of particle masses\n\t\tmass = np.zeros(len(self.disc.xyzh[0,:]))\n\t\tmass[self.disc.itype == 1] = self.disc.massofgas\n\t\tmass[self.disc.itype == 2] = self.disc.massofdust\n\n\t\tclumpdata = zip(self.disc.xyzh[0,members], self.disc.xyzh[1,members], self.disc.xyzh[2,members], \n self.disc.xyzh[3,members], self.disc.density[members], mass[members], \n temp[members], dustfrac[members], self.disc.itype[members])\n\t\tclumpdata = np.asarray(clumpdata)\n\t\theader = (\"time: %s utime (yrs^-1): %s \\n x, y, z, h, density, mass, temp, \" %(str(self.disc.time), str(utime)) +\n\t\t\t \"dustfrac, itype \\n %s, %s, %s, %s, %s, %s, 0.0, 0.0, 0.0 \\n \\n\" %(str(self.udist), str(self.udist),\n\t\t\t \t\t\t\t\t\t\t\t str(self.udist), str(self.udist),\n\t\t\t\t\t\t\t\t\t\t\t str(self.udens), str(self.umass)))\n\t\tnp.savetxt('%s/clumpfiles/clumpdata_%.0f.txt' %(self.wd,self.disc.time), clumpdata, header=header)",
"def write_pc_cards(bc_file, bc_class):\n bc_file.write('! Output Control\\n')\n oc = bc_class.output_control\n objects = list(oc.param.output_control_option.get_range())\n if oc.output_control_option == objects[0]:\n bc_file.write('OC {}\\n'.format(oc.oc_time_series_id))\n ofs = oc.output_flow_strings\n if not ofs.empty:\n bc_file.write(ofs.to_csv(sep=' ', na_rep='', index=False, header=False,).replace('\\r\\n', '\\n'))\n\n if oc.print_adaptive_mesh:\n bc_file.write('PC ADP\\n')\n if oc.print_numerical_fish_surrogate:\n bc_file.write('PC ELM\\n')\n if oc.screen_output_residual:\n bc_file.write('SOUT RESID\\n')\n if oc.screen_output_all:\n bc_file.write('SOUT ALL\\n')\n if oc.screen_output_mass_error:\n bc_file.write('SOUT MERROR\\n')\n if oc.screen_output_worst_nonlinear_node:\n bc_file.write('SOUT NLNODE\\n')\n if oc.screen_output_worst_linear_node:\n bc_file.write('SOUT LNODE\\n')\n if oc.file_output_wind:\n bc_file.write('FOUT WIND\\n')\n if oc.file_output_wave:\n bc_file.write('FOUT WAVE\\n')\n if oc.file_output_adapted_grid:\n bc_file.write('FOUT ADAPT GRID\\n')\n if oc.file_output_adapted_solution:\n bc_file.write('FOUT ADAPT SW\\n')\n if oc.file_output_adapted_transport:\n bc_file.write('FOUT ADAPT CON\\n')\n if oc.file_output_sediment:\n bc_file.write('FOUT SED\\n')\n\n bc_file.write('\\n') # blank line after Output Control"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set the box sizes and start row for each psf image | def _set_psf_layout_hst(self):
print('setting psf layout for HST')
obj_data=self.obj_data
total_psf_pixels = 0
psf_start_row = 0
for iobj in range(obj_data.size):
if (iobj+1) % 100 == 0:
print(' %d/%d' % (iobj+1,obj_data.size))
# note assuming same psf for all "epochs"
psf_im = self.psf_data.get_psf(iobj)
psf_shape = psf_im.shape
psf_npix = psf_im.size
cen = (np.array(psf_shape)-1.0)/2.0
# we will expand the psfs
for icut in range(obj_data['ncutout'][iobj]):
obj_data['psf_row_size'][iobj,icut] = psf_shape[0]
obj_data['psf_col_size'][iobj,icut] = psf_shape[1]
obj_data['psf_cutout_row'][iobj,icut] = cen[0]
obj_data['psf_cutout_col'][iobj,icut] = cen[1]
obj_data['psf_start_row'][iobj,icut] = psf_start_row
psf_start_row += psf_npix
total_psf_pixels += psf_npix
self.total_psf_pixels = total_psf_pixels | [
"def _set_psf_layout(self):\n\n obj_data=self.obj_data\n\n producer = self.producer\n\n cat = producer.getCatalog()\n stamps = producer.getStamps(cat[0])\n\n sdata = stamps[0]\n psfobj=sdata['stamp'].getPsf()\n psfim = psfobj.computeKernelImage(sdata['image_pos']).array\n\n # some padding\n psf_size = max(psfim.shape)+2\n\n # now assume all are the same size for reserving the\n # data on disk. Not all pixels will be used\n\n #obj_data['psf_box_size'] = psf_size\n total_psf_pixels = 0\n psf_npix = psf_size*psf_size\n\n #psf_start_row = 0\n for i in xrange(obj_data.size):\n for j in xrange(obj_data['ncutout'][i]):\n #obj_data['psf_start_row'][i,j] = psf_start_row\n\n #psf_start_row += psf_npix\n total_psf_pixels += psf_npix\n\n\n self.total_psf_pixels = total_psf_pixels",
"def _set_psf_layout_psfex(self):\n\n print('setting psf layout for PSFEx')\n\n obj_data=self.obj_data\n psf_data=self.psf_data\n\n total_psf_pixels = 0\n\n #psf_npix = psf_size*psf_size\n\n psf_start_row = 0\n for iobj in range(obj_data.size):\n for icut in range(obj_data['ncutout'][iobj]):\n\n row = obj_data['orig_row'][iobj, icut]\n col = obj_data['orig_col'][iobj, icut]\n file_id = obj_data['file_id'][iobj,icut]\n\n p = psf_data[file_id]\n\n pim = p.get_rec(row,col)\n cen = p.get_center(row,col)\n\n psf_shape = pim.shape\n psf_npix = pim.size\n\n obj_data['psf_row_size'][iobj,icut] = psf_shape[0]\n obj_data['psf_col_size'][iobj,icut] = psf_shape[1]\n obj_data['psf_cutout_row'][iobj,icut] = cen[0]\n obj_data['psf_cutout_col'][iobj,icut] = cen[1]\n obj_data['psf_start_row'][iobj,icut] = psf_start_row\n\n psf_start_row += psf_npix\n total_psf_pixels += psf_npix\n\n\n self.total_psf_pixels = total_psf_pixels",
"def setup_iboxes(self):\n for field in box_headers.keys():\n # check if iboxes exist\n flist = match_files(self.data_dir,'{0}*lighttravel_cat_*_*.npy'.format(box_headers[field]))\n if flist == []: \n self.create_iboxes(field)\n f = match_file(self.data_dir,'{0}*lighttravel_cat_0_*.npy'.format(box_headers[field])) \n self.check_assign_box_pms(f)\n print \"set up iboxes for field %s\" % field",
"def build_filler_images(self):",
"def set_box_size(self, num=8):\n self.__box_size = num\n self.img_data = None",
"def __init__(self, width, height, boxes, start_positions, flag_position):\n self.width = width\n self.height = height\n self.boxes = boxes\n self.start_positions = start_positions\n self.flag_position = flag_position",
"def create_iboxes(self,field):\n # load all of the integrated lighttravel boxes\n flist = match_files(self.data_dir,'{0}*lighttravel'.format(box_headers[field]))\n box_list = []; zi_list = []; zf_list = []; box_zMpc = 0;\n if len(flist)==0: raise RuntimeError('No file found for field {0}'.format(field))\n for f in flist:\n zi,zf,_,box_size,box_Mpc = nums_from_string(f)\n data_file = self.data_dir+f\n box_data = np.fromfile(data_file,dtype=np.float32)\n box_list.append(box_data.reshape((box_size,box_size,box_size)))\n zi_list.append(zi)\n zf_list.append(zf)\n box_zMpc += box_Mpc\n # define parameters for concatenated box\n boxpms = {}\n boxpms['zMpc'] = int(box_zMpc)\n boxpms['xyMpc'] = int(box_Mpc)\n boxpms['zi'] = min(zi_list)\n boxpms['zf'] = max(zf_list)\n # create catbox\n sorted_box_list = [box for (zi,box) in sorted(zip(zi_list,box_list))]\n catbox = np.concatenate(sorted_box_list,axis=2)\n boxpms['shape']=catbox.shape\n # create and save separated iboxes \n ibox_list = np.array_split(catbox,self.itot,axis=2)\n catbox=None\n for ii,box in enumerate(ibox_list):\n np.save('{5}{0}zstart{1}_zend{2}_FLIPBOXES1_{3}_{4}Mpc_lighttravel_cat_{6}_{7}'.format(\n box_headers[field],boxpms['zi'],boxpms['zf'],boxpms['zMpc'],boxpms['xyMpc'],self.data_dir,ii,self.itot),box)",
"def _scale_boxes(self, boxes):\n height, width = self._dims\n image_dims = K.stack([height, width, height, width])\n image_dims = K.reshape(image_dims, [1, 4])\n boxes = boxes * image_dims\n return boxes",
"def __init__(self, boxsize):\n self._boxsize = size\n self.framen = -1",
"def draw_boxes(self, im, boxes):\n for bbox in boxes:\n l = [int(x) for x in bbox[\"coords\"]]\n l = self.scalebox(l)\n icon = self.classes_to_icons[bbox[\"label\"]]\n overlay_im_to_background(im, icon, l[0], l[1] - icon.shape[0] - 5)\n cv2.rectangle(im,(l[0],l[1]),(l[2],l[3]),self.color,2)",
"def makeImage(self):\n\n for row in range(self.height):\n self.makeRow(row)\n self.window.update() # display a row of pixels",
"def set_ibox_shape(self):\n f = match_file(self.data_dir,'{0}*lighttravel_cat_0_{1}.npy'.format(box_headers['nf'],self.itot))\n if f==None:\n raise RuntimeError('No file found for field nf, ibox 0 when getting ibox shape')\n else: \n self.check_assign_box_pms(f)\n box = np.load(self.data_dir+f)\n self.pms['ishape'] = box.shape",
"def create_default_boxes():\n default_boxes_loc = []\n # cal coords of all default boxes of every feat layer\n for i, feat_shape in enumerate(Config.model.feat_shape):\n num_box = len(Config.model.anchor_sizes[i]) + len(Config.model.anchor_ratios[i])\n\n cy, cx = np.mgrid[0:feat_shape[0], 0:feat_shape[1]]\n # set center in each pix as centers, and relative position of image, range(0,1)\n cy = (cy + 0.5) * Config.model.anchor_steps[i] / Config.model.image_shape[0]\n cx = (cx + 0.5) * Config.model.anchor_steps[i] / Config.model.image_shape[1]\n # cy,cx --shape[H,W,1]\n cy = np.expand_dims(cy, -1).astype('float32')\n cx = np.expand_dims(cx, -1).astype('float32')\n w = np.zeros(num_box, dtype='float32')\n h = np.zeros(num_box, dtype='float32')\n # use anchor_sizes, anchor_ratios and original image size to get relative H,W , shape:[B,]\n h[0] = Config.model.anchor_sizes[i][0] / Config.model.image_shape[0]\n w[0] = Config.model.anchor_sizes[i][0] / Config.model.image_shape[1]\n h[1] = np.sqrt(Config.model.anchor_sizes[i][0] * Config.model.anchor_sizes[i][1]) / Config.model.image_shape[0]\n w[1] = np.sqrt(Config.model.anchor_sizes[i][0] * Config.model.anchor_sizes[i][1]) / Config.model.image_shape[1]\n for j, ratio in enumerate(Config.model.anchor_ratios[i]):\n h[j + 2] = h[0] / np.sqrt(ratio)\n w[j + 2] = w[0] * np.sqrt(ratio)\n default_boxes_loc.append([cy, cx, h, w])\n return default_boxes_loc",
"def set_boxes(self) :\n self.ui.rawBox.addItem(\".sef\")\n self.ui.rawBox.addItem(\".fif\")\n self.ui.mrkBox.addItem(\".mrk\")\n self.ui.mrkBox.addItem(\"-eve.fif\")",
"def _get_box_sizes(self, image_info, cat):\n\n\n file_id=0\n impath=image_info['image_path'][file_id].strip()\n ext=image_info['image_ext'][file_id]\n wcs_data = fitsio.read_header(impath, ext=ext)\n wcs = eu.wcsutil.WCS(wcs_data)\n\n\n jacob = wcs.get_jacobian(100,100)\n dudcol, dudrow, dvdcol, dvdrow = jacob\n\n det = dvdrow*dudcol - dvdcol*dudrow\n pixel_scale = np.sqrt(abs(det))\n print('found pixel scale:',pixel_scale)\n box_size = cat['box_size_arcsec']/pixel_scale\n\n # clip to range\n box_size.clip(\n min=self['min_box_size'],\n max=self['max_box_size'],\n out=box_size,\n )\n box_size = box_size.astype('i4')\n\n w,=np.where( ( (box_size % 2) != 0 ) )\n if w.size > 0:\n box_size[w] += 1\n\n return box_size",
"def setBoxsize(length,width,height):\n return length,width,height",
"def draw_boxs(img,boxs,width=3,color=(0,0,255)):\n box_img = copy.deepcopy(img)\n for i in range(boxs.shape[0]):\n # x1,y1,x2,y2=boxs[i]\n x1 = boxs[i][0]\n y1 = boxs[i][1]\n x2 = boxs[i][2]\n y2 = boxs[i][3]\n p1 = (int(round(x1)),int(round(y1)))\n p2 = (int(round(x2)),int(round(y2)))\n cv2.rectangle(box_img, p1, p2, color, width)\n\n return box_img",
"def _resize_bboxes(self, results):\n\n img_shape = results[\"img_shape\"]\n for key in results.get(\"bbox_fields\", []):\n bboxes = results[key] * results[\"scale_factor\"]\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes",
"def scale_boxes(boxes, image_shape):\n height = image_shape[0]\n width = image_shape[1]\n image_dims = np.stack([height, width, height, width])\n image_dims = np.reshape(image_dims, [1, 4])\n boxes = boxes * image_dims\n return boxes"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set the box sizes and start row for each psf image | def _set_psf_layout_psfex(self):
print('setting psf layout for PSFEx')
obj_data=self.obj_data
psf_data=self.psf_data
total_psf_pixels = 0
#psf_npix = psf_size*psf_size
psf_start_row = 0
for iobj in range(obj_data.size):
for icut in range(obj_data['ncutout'][iobj]):
row = obj_data['orig_row'][iobj, icut]
col = obj_data['orig_col'][iobj, icut]
file_id = obj_data['file_id'][iobj,icut]
p = psf_data[file_id]
pim = p.get_rec(row,col)
cen = p.get_center(row,col)
psf_shape = pim.shape
psf_npix = pim.size
obj_data['psf_row_size'][iobj,icut] = psf_shape[0]
obj_data['psf_col_size'][iobj,icut] = psf_shape[1]
obj_data['psf_cutout_row'][iobj,icut] = cen[0]
obj_data['psf_cutout_col'][iobj,icut] = cen[1]
obj_data['psf_start_row'][iobj,icut] = psf_start_row
psf_start_row += psf_npix
total_psf_pixels += psf_npix
self.total_psf_pixels = total_psf_pixels | [
"def _set_psf_layout(self):\n\n obj_data=self.obj_data\n\n producer = self.producer\n\n cat = producer.getCatalog()\n stamps = producer.getStamps(cat[0])\n\n sdata = stamps[0]\n psfobj=sdata['stamp'].getPsf()\n psfim = psfobj.computeKernelImage(sdata['image_pos']).array\n\n # some padding\n psf_size = max(psfim.shape)+2\n\n # now assume all are the same size for reserving the\n # data on disk. Not all pixels will be used\n\n #obj_data['psf_box_size'] = psf_size\n total_psf_pixels = 0\n psf_npix = psf_size*psf_size\n\n #psf_start_row = 0\n for i in xrange(obj_data.size):\n for j in xrange(obj_data['ncutout'][i]):\n #obj_data['psf_start_row'][i,j] = psf_start_row\n\n #psf_start_row += psf_npix\n total_psf_pixels += psf_npix\n\n\n self.total_psf_pixels = total_psf_pixels",
"def _set_psf_layout_hst(self):\n\n print('setting psf layout for HST')\n obj_data=self.obj_data\n\n total_psf_pixels = 0\n psf_start_row = 0\n\n for iobj in range(obj_data.size):\n if (iobj+1) % 100 == 0:\n print(' %d/%d' % (iobj+1,obj_data.size))\n\n # note assuming same psf for all \"epochs\"\n psf_im = self.psf_data.get_psf(iobj)\n\n psf_shape = psf_im.shape\n psf_npix = psf_im.size\n\n cen = (np.array(psf_shape)-1.0)/2.0\n\n # we will expand the psfs\n\n for icut in range(obj_data['ncutout'][iobj]):\n\n obj_data['psf_row_size'][iobj,icut] = psf_shape[0]\n obj_data['psf_col_size'][iobj,icut] = psf_shape[1]\n obj_data['psf_cutout_row'][iobj,icut] = cen[0]\n obj_data['psf_cutout_col'][iobj,icut] = cen[1]\n obj_data['psf_start_row'][iobj,icut] = psf_start_row\n\n psf_start_row += psf_npix\n total_psf_pixels += psf_npix\n\n self.total_psf_pixels = total_psf_pixels",
"def setup_iboxes(self):\n for field in box_headers.keys():\n # check if iboxes exist\n flist = match_files(self.data_dir,'{0}*lighttravel_cat_*_*.npy'.format(box_headers[field]))\n if flist == []: \n self.create_iboxes(field)\n f = match_file(self.data_dir,'{0}*lighttravel_cat_0_*.npy'.format(box_headers[field])) \n self.check_assign_box_pms(f)\n print \"set up iboxes for field %s\" % field",
"def build_filler_images(self):",
"def set_box_size(self, num=8):\n self.__box_size = num\n self.img_data = None",
"def __init__(self, width, height, boxes, start_positions, flag_position):\n self.width = width\n self.height = height\n self.boxes = boxes\n self.start_positions = start_positions\n self.flag_position = flag_position",
"def create_iboxes(self,field):\n # load all of the integrated lighttravel boxes\n flist = match_files(self.data_dir,'{0}*lighttravel'.format(box_headers[field]))\n box_list = []; zi_list = []; zf_list = []; box_zMpc = 0;\n if len(flist)==0: raise RuntimeError('No file found for field {0}'.format(field))\n for f in flist:\n zi,zf,_,box_size,box_Mpc = nums_from_string(f)\n data_file = self.data_dir+f\n box_data = np.fromfile(data_file,dtype=np.float32)\n box_list.append(box_data.reshape((box_size,box_size,box_size)))\n zi_list.append(zi)\n zf_list.append(zf)\n box_zMpc += box_Mpc\n # define parameters for concatenated box\n boxpms = {}\n boxpms['zMpc'] = int(box_zMpc)\n boxpms['xyMpc'] = int(box_Mpc)\n boxpms['zi'] = min(zi_list)\n boxpms['zf'] = max(zf_list)\n # create catbox\n sorted_box_list = [box for (zi,box) in sorted(zip(zi_list,box_list))]\n catbox = np.concatenate(sorted_box_list,axis=2)\n boxpms['shape']=catbox.shape\n # create and save separated iboxes \n ibox_list = np.array_split(catbox,self.itot,axis=2)\n catbox=None\n for ii,box in enumerate(ibox_list):\n np.save('{5}{0}zstart{1}_zend{2}_FLIPBOXES1_{3}_{4}Mpc_lighttravel_cat_{6}_{7}'.format(\n box_headers[field],boxpms['zi'],boxpms['zf'],boxpms['zMpc'],boxpms['xyMpc'],self.data_dir,ii,self.itot),box)",
"def _scale_boxes(self, boxes):\n height, width = self._dims\n image_dims = K.stack([height, width, height, width])\n image_dims = K.reshape(image_dims, [1, 4])\n boxes = boxes * image_dims\n return boxes",
"def __init__(self, boxsize):\n self._boxsize = size\n self.framen = -1",
"def draw_boxes(self, im, boxes):\n for bbox in boxes:\n l = [int(x) for x in bbox[\"coords\"]]\n l = self.scalebox(l)\n icon = self.classes_to_icons[bbox[\"label\"]]\n overlay_im_to_background(im, icon, l[0], l[1] - icon.shape[0] - 5)\n cv2.rectangle(im,(l[0],l[1]),(l[2],l[3]),self.color,2)",
"def makeImage(self):\n\n for row in range(self.height):\n self.makeRow(row)\n self.window.update() # display a row of pixels",
"def set_ibox_shape(self):\n f = match_file(self.data_dir,'{0}*lighttravel_cat_0_{1}.npy'.format(box_headers['nf'],self.itot))\n if f==None:\n raise RuntimeError('No file found for field nf, ibox 0 when getting ibox shape')\n else: \n self.check_assign_box_pms(f)\n box = np.load(self.data_dir+f)\n self.pms['ishape'] = box.shape",
"def create_default_boxes():\n default_boxes_loc = []\n # cal coords of all default boxes of every feat layer\n for i, feat_shape in enumerate(Config.model.feat_shape):\n num_box = len(Config.model.anchor_sizes[i]) + len(Config.model.anchor_ratios[i])\n\n cy, cx = np.mgrid[0:feat_shape[0], 0:feat_shape[1]]\n # set center in each pix as centers, and relative position of image, range(0,1)\n cy = (cy + 0.5) * Config.model.anchor_steps[i] / Config.model.image_shape[0]\n cx = (cx + 0.5) * Config.model.anchor_steps[i] / Config.model.image_shape[1]\n # cy,cx --shape[H,W,1]\n cy = np.expand_dims(cy, -1).astype('float32')\n cx = np.expand_dims(cx, -1).astype('float32')\n w = np.zeros(num_box, dtype='float32')\n h = np.zeros(num_box, dtype='float32')\n # use anchor_sizes, anchor_ratios and original image size to get relative H,W , shape:[B,]\n h[0] = Config.model.anchor_sizes[i][0] / Config.model.image_shape[0]\n w[0] = Config.model.anchor_sizes[i][0] / Config.model.image_shape[1]\n h[1] = np.sqrt(Config.model.anchor_sizes[i][0] * Config.model.anchor_sizes[i][1]) / Config.model.image_shape[0]\n w[1] = np.sqrt(Config.model.anchor_sizes[i][0] * Config.model.anchor_sizes[i][1]) / Config.model.image_shape[1]\n for j, ratio in enumerate(Config.model.anchor_ratios[i]):\n h[j + 2] = h[0] / np.sqrt(ratio)\n w[j + 2] = w[0] * np.sqrt(ratio)\n default_boxes_loc.append([cy, cx, h, w])\n return default_boxes_loc",
"def set_boxes(self) :\n self.ui.rawBox.addItem(\".sef\")\n self.ui.rawBox.addItem(\".fif\")\n self.ui.mrkBox.addItem(\".mrk\")\n self.ui.mrkBox.addItem(\"-eve.fif\")",
"def _get_box_sizes(self, image_info, cat):\n\n\n file_id=0\n impath=image_info['image_path'][file_id].strip()\n ext=image_info['image_ext'][file_id]\n wcs_data = fitsio.read_header(impath, ext=ext)\n wcs = eu.wcsutil.WCS(wcs_data)\n\n\n jacob = wcs.get_jacobian(100,100)\n dudcol, dudrow, dvdcol, dvdrow = jacob\n\n det = dvdrow*dudcol - dvdcol*dudrow\n pixel_scale = np.sqrt(abs(det))\n print('found pixel scale:',pixel_scale)\n box_size = cat['box_size_arcsec']/pixel_scale\n\n # clip to range\n box_size.clip(\n min=self['min_box_size'],\n max=self['max_box_size'],\n out=box_size,\n )\n box_size = box_size.astype('i4')\n\n w,=np.where( ( (box_size % 2) != 0 ) )\n if w.size > 0:\n box_size[w] += 1\n\n return box_size",
"def setBoxsize(length,width,height):\n return length,width,height",
"def draw_boxs(img,boxs,width=3,color=(0,0,255)):\n box_img = copy.deepcopy(img)\n for i in range(boxs.shape[0]):\n # x1,y1,x2,y2=boxs[i]\n x1 = boxs[i][0]\n y1 = boxs[i][1]\n x2 = boxs[i][2]\n y2 = boxs[i][3]\n p1 = (int(round(x1)),int(round(y1)))\n p2 = (int(round(x2)),int(round(y2)))\n cv2.rectangle(box_img, p1, p2, color, width)\n\n return box_img",
"def _resize_bboxes(self, results):\n\n img_shape = results[\"img_shape\"]\n for key in results.get(\"bbox_fields\", []):\n bboxes = results[key] * results[\"scale_factor\"]\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes",
"def scale_boxes(boxes, image_shape):\n height = image_shape[0]\n width = image_shape[1]\n image_dims = np.stack([height, width, height, width])\n image_dims = np.reshape(image_dims, [1, 4])\n boxes = boxes * image_dims\n return boxes"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
read the cosmos catalog | def _read_catalog(self, catname):
print('loading catalog:',catname)
with fitsio.FITS(catname,lower=True) as fits:
#cat = fits[1][100000:110000]
if 'object_data' in fits:
print('reading from MEDS object data')
ext='object_data'
else:
ext=1
cat = fits[ext][:]
# one cut here based on if we matched to the galsim cat
w, = np.where(
#(cat['mu_class'] < 3)
#&
#(cat['mask']==0)
#&
(cat['gscosmos_index'] >= 0)
)
print('initial cuts %d/%d %g%%' % (w.size,cat.size,w.size/cat.size*100))
cat = cat[w]
return cat | [
"def read_catalog(catalog):\n with open(catalog, \"r\") as f:\n header = f.readline()\n if header.startswith('#EventID | Time | Latitude | Longitude | Depth/km'):\n catalog = _read_iris(f)\n elif header.startswith('time, latitude, longitude, depth, depthUnits, magnitude'):\n catalog = _read_sod(f)\n else:\n sys.exit(\"Unknown catalog format\")\n return catalog",
"def catalog():",
"def _load_catalog(self):\n self.catalog = pd.read_csv(self.catalog_path)\n nrow, ncol = self.catalog.shape\n logger.info(\"Loaded SNRs catalog data from: {0}\".format(\n self.catalog_path))\n logger.info(\"SNRs catalog data: {0} objects, {1} columns\".format(\n nrow, ncol))\n # Set the units for columns\n self.units = {\n \"glon\": au.deg,\n \"glat\": au.deg,\n \"size\": au.arcmin,\n \"flux\": au.Jy,\n }\n # The flux densities are given at 1 GHz\n self.catalog_flux_freq = 1.0 * au.GHz",
"def getCatalog(unique_name):",
"def get_catalog(cls):\n return Catalog.open(os.path.join(testpath, 'catalog/catalog.json'))",
"def read_catalog():\n categories = session.query(Category).all()\n items = session.query(CatalogItem).order_by(CatalogItem.id.desc())\n quantity = items.count()\n return categories, items, quantity",
"def readCAS(self):\n\t\tpath=os.path.join(self.mirrorLocation,'depository','species','catalog')\n\t\tlistOfFiles=os.listdir(path)\n\t\treCas=re.compile('CASRegistryNumber\">([0-9/-]+)</name>')\n\t\tfor filename in listOfFiles:\n\t\t\tfilePath=os.path.join(path,filename)\n\t\t\tif not os.path.isfile(filePath): continue\n\t\t\tdata=file(filePath,'r').read()\n\t\t\tmatch=reCas.search(data)\n\t\t\tprimeid=os.path.splitext(filename)[0]\n\t\t\tif match:\n\t\t\t\tcas=match.group(1)\n\t\t\t\tprint primeid,cas\n\t\t\t\t# each primed has a unique cas so we just store it\n\t\t\t\tself.primeid2cas[primeid]=cas\n\t\t\t\t# each cas may have more than one primeid so we store as a list and append\n\t\t\t\tif self.cas2primeids.has_key(cas):\n\t\t\t\t\tself.cas2primeids[cas].append(primeid)\n\t\t\t\t\tprint \"Warning! species %s all have same CAS %s\"%(self.cas2primeids[cas], cas)\n\t\t\t\telse:\n\t\t\t\t\tself.cas2primeids[cas]=[primeid]",
"def read_catalog(cls, cat, path, pub=None):\n\n catf = file(os.path.join(path, \"catalog\"))\n for line in catf:\n if not line.startswith(\"V pkg\") and \\\n not line.startswith(\"C pkg\"):\n continue\n f = cls.__parse_entry(line, pub)\n ServerCatalog.cache_fmri(cat, f, pub)\n\n catf.close()",
"def getCatalog(self):\n return \"\".join([self.catalog[n].fd.getInfo()\n for n in sorted(self.catalog)])",
"def catalog(self):\n tpl = self.data.get('catalog', None)\n if tpl != None:\n return tpl[2]\n return None",
"def get_catalog():\n return jsonify(getCatalog())",
"def testCosmologyCatalog(self):\n dbObj = myTestGals(database=self.dbName)\n cat = cosmologicalGalaxyCatalog(dbObj)\n cat.write_catalog(self.catName)",
"def _get_catalog_object(self):\n return self.cluster.catalogd.service.read_debug_webpage(\n \"catalog_object?object_type=TABLE&object_name=functional.alltypes\")",
"def load_catalog(self):\n self.catalog = pd.read_csv(self.catalog_path, \n index_col=0, parse_dates=True)\n self.unique_years = self.catalog.index.year.unique()\n return",
"def test_api_ucs_get_catalog(self):\n api_data = request(\"get\", \"/sys\")\n self.assertEqual(api_data['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))\n total_elements = 0\n for elementTypes in api_data[\"json\"]:\n for element in api_data[\"json\"][str(elementTypes)]:\n api_data_c = request(\"get\", \"/catalog\",\n query={\"identifier\": element[\"relative_path\"].strip(\"/\")})\n self.assertEqual(api_data_c['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' +\n str(api_data_c['status']))\n total_elements += 1\n self.assertGreater(total_elements, 0, \"Zero catalog elements found\")\n # TO DO: deeper check on the catalog data",
"def readDataFromCosmosDB(self):\n self.cosmosdb.updateCollectionThroughput(\n self.config.get_database_name(), self.config.get_hash_table(), self.config.get_scaleup_cosmos(),\n self.config.get_key(),\n self.config.get_cosmos_account())\n\n # read all the data from cosmos DB with encrypted fields and store in a data frame\n df = spark.read.format(\"com.microsoft.azure.cosmosdb.spark\").options(\n **self.config.get_hash_readconfig()).load()\n\n # iterate over the dataframe and decrypt and replace all fields except the cosmos db system fields strating\n # with \"_\" and the key --> id field since its hashed not encrypted and also not the partition field\n df = df.repartition(160).cache()\n dec_udf = udf(decrypt)\n\n for columns in df.columns:\n if columns.startswith('_') or columns.startswith('id') or columns.startswith('partition'):\n print('not to be encrypted field: ' + columns)\n else:\n print('to be encrypted field: ' + columns)\n df = df.withColumn(columns, dec_udf(df[columns]))\n print(\"succesfully decrypted the fields in spark df data frame\")\n\n # Register the DataFrame as a SQL temporary view\n df = df.repartition(1).cache()\n # df.persist(StorageLevel.DISK_ONLY_2)\n df.createOrReplaceTempView(\"customer\")\n spark.sql(\"CACHE TABLE customer\").collect()\n\n print(\"succesfully read \" + str(df.count()) +\n \" records from CosmosDB and saved in spark df data frame\")\n self.cosmosdb.updateCollectionThroughput(\n self.config.get_database_name(), self.config.get_hash_table(), self.config.get_scaledown_cosmos(),\n self.config.get_key(),\n self.config.get_cosmos_account())\n\n return df",
"def load_catalog(path=path_to_cat):\n catalog = pd.read_csv(path,delim_whitespace=True,usecols=(3,6), names = (\"qso_name\",\"redshift\"))\n return catalog[\"qso_name\"],catalog[\"redshift\"]",
"def readCatalogue(self,catalogue):\n fname = str(catalogue)\n fpath = \"\"\n f = open(fpath+fname,\"r\")\n print(\"Using catalogue file: %s\" % f.name)\n print(\"Loading source information.\")\n\n for line in f:\n name = line.rstrip()\n if name.lower() == \"sun\":\n src = RadioSource(\"Sun\")\n src.sun()\n self.radioSources.append(src)\n elif name.lower() == \"moon\":\n src = RadioSource(\"Moon\")\n src.moon()\n self.radioSources.append(src)\n else:\n src = RadioSource(name)\n chk = src.lookupAstropy()\n if chk == True:\n # found the source online\n if src.getExists() == True:\n self.radioSources.append(src)\n else:\n pass\n else:\n # can't find source online - maybe internet is down - use radec coords if supplied\n lineList = line.rstrip().split()\n if len(lineList) > 1:\n name = lineList[0]\n ra = lineList[1]\n dec = lineList[2]\n # what if the source name ias a space e.g. cass A\n else:\n pass\n f.close()",
"def read_catalogs(config, key=None, list_key=None, num=0, logger=None, is_rand=None):\n if logger is None:\n logger = treecorr.config.setup_logger(\n treecorr.config.get(config,'verbose',int,0), config.get('log_file',None))\n\n if key is None and list_key is None:\n raise AttributeError(\"Must provide either key or list_key\")\n if key is not None and key in config:\n if list_key in config:\n raise AttributeError(\"Cannot provide both key and list_key\")\n file_names = config[key]\n elif list_key is not None and list_key in config:\n list_file = config[list_key]\n with open(list_file,'r') as fin:\n file_names = [ f.strip() for f in fin ]\n else:\n # If this key was required (i.e. file_name) then let the caller check this.\n return []\n if is_rand is None:\n if key is not None:\n is_rand = 'rand' in key\n else:\n is_rand = 'rand' in list_key\n if not isinstance(file_names,list):\n file_names = file_names.split()\n return [ Catalog(file_name, config, num, logger, is_rand) for file_name in file_names ]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
add fields from the cat some will not be in the odata but some will. When copy is True We will copy over the ones that are in both, in some cases | def _add_cat_fields(self, odata, copy=True):
# these are required fileds from get_meds_output_dtype
# that we have put into the input catalog
always_copy=[
'id',
'ra',
'dec',
]
cat = self.cat_orig
add_dt = []
for d in cat.dtype.descr:
n = d[0]
if n not in odata.dtype.names:
add_dt.append(d)
obj_data = eu.numpy_util.add_fields(
odata,
add_dt,
)
if copy:
for n in always_copy:
obj_data[n] = cat[n]
for d in add_dt:
n = d[0]
if n in always_copy:
continue
# don't clobber things that should be left at
# their default values
if n not in odata.dtype.names:
obj_data[n] = cat[n]
return obj_data | [
"def copyObjAttr(obj1,obj2,filterAttr=[],debug=False):\n\t\n\t\n\t#Going through all attributes blank object\n\tfor item in vars(obj1):\n\t\tif item not in filterAttr:\n\t\t\tsetattr(obj2, str(item), vars(obj1)[str(item)])\n\t\t\n\treturn obj2",
"def copyAttributes(self, other, add_nxpars=False):\n import copy\n \n self.setTitle(other.getTitle())\n self.setDataSetType(other.getDataSetType())\n self.setAllAxisLabels(other.getAllAxisLabels())\n self.setAllAxisUnits(other.getAllAxisUnits())\n self.setYLabel(other.getYLabel())\n self.setYUnits(other.getYUnits())\n if len(self.attr_list.keys()) == 0:\n self.attr_list = copy.copy(other.attr_list)\n else:\n self.attr_list.instrument = copy.copy(other.attr_list.instrument)\n self.attr_list.sample = copy.copy(other.attr_list.sample)\n\n if add_nxpars:\n nxpar_keys = [item[0] for item in self.attr_list.iteritems() \\\n if isinstance(item[1], NxParameter)]\n\n for nxpar_key in nxpar_keys:\n self.attr_list[nxpar_key] += other.attr_list[nxpar_key]\n else:\n # Do nothing\n pass\n \n keys_to_get = [other_key for other_key in other.attr_list \\\n if other_key not in self.attr_list]\n \n for key_to_get in keys_to_get:\n self.attr_list[key_to_get] = \\\n copy.copy(other.attr_list[key_to_get])",
"def test__ComponentMetadataRow__copy_with__1():\n old_components = [Component(ComponentType.button, label = 'chata')]\n new_components = [Component(ComponentType.button, label = 'yuina')]\n \n component_metadata = ComponentMetadataRow(\n components = old_components,\n )\n copy = component_metadata.copy_with(\n components = new_components,\n )\n \n _assert_fields_set(copy)\n vampytest.assert_is_not(component_metadata, copy)\n vampytest.assert_eq(copy.components, tuple(new_components))",
"def _copy_only_fields(self, doc, fields, container):\r\n\r\n if fields is None:\r\n return self._copy_field(doc, container)\r\n else:\r\n if not fields:\r\n fields = {\"_id\": 1}\r\n if not isinstance(fields, dict):\r\n fields = helpers._fields_list_to_dict(fields)\r\n\r\n #we can pass in something like {\"_id\":0, \"field\":1}, so pull the id value out and hang on to it until later\r\n id_value = fields.pop('_id', 1)\r\n\r\n #other than the _id field, all fields must be either includes or excludes, this can evaluate to 0\r\n if len(set(list(fields.values()))) > 1:\r\n raise ValueError('You cannot currently mix including and excluding fields.')\r\n\r\n #if we have novalues passed in, make a doc_copy based on the id_value\r\n if len(list(fields.values())) == 0:\r\n if id_value == 1:\r\n doc_copy = container()\r\n else:\r\n doc_copy = self._copy_field(doc, container)\r\n #if 1 was passed in as the field values, include those fields\r\n elif list(fields.values())[0] == 1:\r\n doc_copy = container()\r\n for key in fields:\r\n if key in doc:\r\n doc_copy[key] = doc[key]\r\n #otherwise, exclude the fields passed in\r\n else:\r\n doc_copy = self._copy_field(doc, container)\r\n for key in fields:\r\n if key in doc_copy:\r\n del doc_copy[key]\r\n\r\n #set the _id value if we requested it, otherwise remove it\r\n if id_value == 0:\r\n if '_id' in doc_copy:\r\n del doc_copy['_id']\r\n else:\r\n if '_id' in doc:\r\n doc_copy['_id'] = doc['_id']\r\n\r\n fields['_id'] = id_value #put _id back in fields\r\n return doc_copy",
"def union(self, other: Catalog) -> Catalog:\n cat = self.copy()\n oth_cp = other.copy()\n\n for k in oth_cp.keys():\n for ver_id, version in oth_cp[k].versions.items():\n cat[k][ver_id] = version\n return cat",
"def _copyFields(self, dest, src, etreeModel=None, withReadOnly=False):\n for field in dest._meta.fields:\n # Ignore pk fields\n if field.primary_key:\n continue\n if not withReadOnly and getattr(field, 'APIReadOnly', None):\n # Ignore APIReadOnly fields\n continue\n\n # django throws ObjectDoesNotExist if you try to access a\n # related model that doesn't exist, so just swallow it.\n try:\n newFieldVal = getattr(src, field.name, None)\n except exceptions.ObjectDoesNotExist:\n newFieldVal = None\n\n # Make sure we don't overwrite existing values on dest with fields\n # that default to None by checking that the field was actually\n # provided on etreeModel.\n if newFieldVal is None and (etreeModel is None or etreeModel.find(field.name) is None):\n continue\n\n # Set the new value on dest if it differs from the old value.\n oldFieldVal = getattr(dest, field.name)\n if newFieldVal != oldFieldVal:\n setattr(dest, field.name, newFieldVal)",
"def merge_fields(self, request, obj, old_self):\n return self",
"def merge(self, new_store):\n if new_store.name and len(new_store.name) > 0:\n self.name = new_store.name\n if new_store.address and len(new_store.address) > 0:\n self.address = new_store.address\n if new_store.city and len(new_store.city) > 0:\n self.city = new_store.city\n if new_store.state and len(new_store.state) > 0:\n self.state = new_store.state\n if new_store.zip and new_store.zip > 0:\n self.zipcode = new_store.zip\n if new_store.phone and new_store.phone > 0:\n self.phone = new_store.phone",
"def field_copy(self, rec, *keys):\n retval = dbdict()\n for key in keys:\n if key in rec:\n retval[key] = rec[key]\n return retval",
"def copy_attrs(data_orig, data_new):\n\n if isinstance(data_orig, Dataset):\n\n # Variables\n for v in data_orig.data_vars:\n field = data_orig[v]\n for attr, val in field.attrs.items():\n data_new[v].attrs[attr] = val\n\n # Coordinates\n for c in data_orig.coords:\n coord = data_orig.coords[c]\n for attr, val in coord.attrs.items():\n if c in data_new.coords:\n data_new.coords[c].attrs[attr] = val\n\n # Metadata\n for attr, val in data_orig.attrs.items():\n data_new.attrs[attr] = val\n\n elif isinstance(data_orig, DataArray):\n\n # Variable Metadata\n for att, val in data_orig.attrs.items():\n data_new.attrs[att] = val\n\n # Coordinates\n for c in data_orig.coords:\n coord = data_orig.coords[c]\n for attr, val in coord.attrs.items():\n if c in data_new.coords:\n data_new.coords[c].attrs[attr] = val\n\n else:\n raise ValueError(\"Couldn't handle type %r\" % type(data_orig))\n\n return data_new",
"def test_copy_features(self):\n fc = self.read_feature()\n other = FeatureCollection(features=fc.features,\n otherProperties=fc.otherProperties)\n assert len(other.features) == 1\n feature = other.features[0]\n\n self.check_feature(feature)",
"def _merge_attributes(self, workout):\n keys = self.__table__.columns.keys()\n for key in keys:\n if key in [\"id\",\n \"external_id\",\n \"is_duplicate_with\",\n \"manual_check_required_with\",\n ]:\n continue\n elif getattr(self, key) == None:\n # copy attribute if empty; else keep existing \n setattr(self, key, getattr(workout, key))",
"def copy_attrs_from(self, ods):\n for item in omas_ods_attrs:\n if item not in ['_parent', '_dynamic']:\n setattr(self, item, getattr(ods, item, None))\n return self",
"def _merge_two(self, obj1, obj2):\r\n for uniq_ident in obj2.keys():\r\n if (uniq_ident not in obj1) \\\r\n or (obj1[uniq_ident]['modified'] \\\r\n < obj2[uniq_ident]['modified']):\r\n obj1[uniq_ident] = obj2[uniq_ident]\r\n\r\n return obj1 # self._dict_to_list(obj1)\r",
"def copyBooks(self):\n skipMods = set(('Morrowind.esm',self.fileInfo.name))\n for id,(record,modName) in (self.srcBooks.items() + self.altBooks.items()):\n if modName not in skipMods:\n self.setRecord(copy.copy(record))",
"def test__ComponentMetadataRow__copy_with_keyword_parameters__1():\n old_components = [Component(ComponentType.button, label = 'chata')]\n new_components = [Component(ComponentType.button, label = 'yuina')]\n \n component_metadata = ComponentMetadataRow(\n components = old_components,\n )\n copy = component_metadata.copy_with_keyword_parameters({\n 'components': new_components,\n })\n \n _assert_fields_set(copy)\n vampytest.assert_is_not(component_metadata, copy)\n vampytest.assert_eq(copy.components, tuple(new_components))",
"def add(self, category, key, obj, sec_obj=None):\n if category == 'merge-candidate':\n self.data_mergecandidate[self.key2data[key]][obj.handle] = \\\n self._extract_mergeinfo(key, obj, sec_obj)\n elif category == 'new-object':\n self.data_newobject[self.key2data[key]] += 1\n elif category == 'unknown-object':\n self.data_unknownobject[self.key2data[key]] += 1\n elif category == 'relative-path':\n self.data_relpath = True",
"def _set_additional_fields(self, po):\n pass",
"def copy_fields(self, model):\n fields = super(HistoricalRecords, self).copy_fields(model)\n for name, field in self.additional_fields.items():\n assert name not in fields\n assert hasattr(self, 'get_%s_value' % name)\n fields[name] = field\n return fields"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
make a new struct with ncutoutsizedarrays based on the actual maximum ncutout | def _make_resized_data(self, odata):
nmax = odata['file_id'].shape[1]
new_nmax = odata['ncutout'].max()
if new_nmax < 2:
new_nmax = 2
temp_obj_data = odata
nobj = temp_obj_data.size
new_data = meds.util.get_meds_output_struct(
nobj,
new_nmax,
extra_fields=self._get_fields(new_nmax),
)
new_data = self._add_cat_fields(new_data, copy=False)
for name in new_data.dtype.names:
if name in temp_obj_data.dtype.names:
shape = new_data[name].shape
lshape = len(shape)
if lshape > 1 and shape[1] == new_nmax:
new_data[name][:,:] = temp_obj_data[name][:,0:new_nmax]
else:
new_data[name][:] = temp_obj_data[name][:]
del temp_obj_data
return new_data | [
"def maxout_var(self, rv):\r\n #self.cpt += 0.00002\r\n exp_len = int(len(self.cpt)/self.card[rv])\r\n new_cpt = np.zeros(exp_len)\r\n\r\n rv_card = self.card[rv]\r\n rv_stride = self.stride[rv]\r\n\r\n k=0\r\n p = np.prod([self.card[r] for r in self.scope if self.stride[r] < self.stride[rv]])\r\n for i in range(exp_len):\r\n max_val = 0\r\n for c in range(rv_card):\r\n if self.cpt[k + (rv_stride*c)] > max_val:\r\n max_val = self.cpt[k + (rv_stride*c)]\r\n new_cpt[i] = max_val\r\n k+=1\r\n if (k % p == 0):\r\n k += (p* (rv_card - 1))\r\n self.cpt=new_cpt\r\n\r\n del self.card[rv]\r\n self.stride.update((k,v/rv_card) for k,v in self.stride.items() if v > rv_stride)\r\n del self.stride[rv]\r\n self.scope.remove(rv)\r\n\r\n #if rv == self.var:\r\n #self.var = [k for k,v in self.stride.items() if v==1][0]\r\n\r\n #if len(self.scope) > 0:\r\n #self.normalize()\r",
"def macs_maxpool(module: _MaxPoolNd, inp: Tensor, out: Tensor) -> int:\n\n k_size = reduce(mul, module.kernel_size) if isinstance(module.kernel_size, tuple) else module.kernel_size\n\n # for each spatial output element, check max element in kernel scope\n return out.numel() * (k_size - 1)",
"def __trim_stack(\n cqt_resp: List[np.ndarray], n_bins: int, dtype: DTypeLike\n) -> np.ndarray:\n max_col = min(c_i.shape[-1] for c_i in cqt_resp)\n # Grab any leading dimensions\n shape = list(cqt_resp[0].shape)\n shape[-2] = n_bins\n shape[-1] = max_col\n cqt_out = np.empty(shape, dtype=dtype, order=\"F\")\n\n # Copy per-octave data into output array\n end = n_bins\n for c_i in cqt_resp:\n # By default, take the whole octave\n n_oct = c_i.shape[-2]\n # If the whole octave is more than we can fit,\n # take the highest bins from c_i\n if end < n_oct:\n cqt_out[..., :end, :] = c_i[..., -end:, :max_col]\n else:\n cqt_out[..., end - n_oct : end, :] = c_i[..., :max_col]\n\n end -= n_oct\n\n return cqt_out",
"def expanding_max_nb(a, minp=1):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = expanding_max_1d_nb(a[:, col], minp=minp)\n return out",
"def max_noutput_items(self) -> \"int\":\n return _beamforming_swig.phasedarray_sptr_max_noutput_items(self)",
"def get_bins(size, n, max_value):\n bin_lims = get_bin_lims(n, max_value)\n return sort_by_rows(np.array(list(itertools.product(bin_lims, repeat=size))))",
"def N50_np(len_np, cut=50):\n cutoff=sum(len_np)*cut/100.0\n len_np.sort()\n \n count=0\n for i in len_np:\n count+=i\n if count>=cutoff:\n break\n \n print ( \"N%d is %d bp.\" % (cut, i))\n \n return i",
"def arrayManipulation_shortpeak(n, queries):\n a_s = []\n b_s = []\n k_s = []\n\n for i, row in enumerate(queries):\n a_s.append(row[0])\n b_s.append(row[1])\n k_s.append(row[2])\n\n # breakpoint()\n x = a_s + b_s\n all_indices = list(set(x))\n all_indices.sort()\n short_arr = [0] * len(all_indices)\n\n # mapping index of n-long array to index of shorter array\n index_lookup = {}\n for j, el in enumerate(all_indices):\n index_lookup[el] = j\n\n # breakpoint()\n for m in range(len(a_s)):\n short_arr[index_lookup[a_s[m]]] += k_s[m]\n short_arr[index_lookup[b_s[m]]] -= k_s[m]\n\n maxval = 0\n cumsum = 0\n for i, el in enumerate(short_arr):\n cumsum += el\n maxval = max(maxval, cumsum)\n\n print(f'{maxval: <15,d}: Max value')\n arr_size = short_arr.__sizeof__() / 1000000\n total = ((a_s.__sizeof__() / 1000000)\n + b_s.__sizeof__() / 1000000\n + k_s.__sizeof__() / 1000000\n + queries.__sizeof__() / 1000000\n + index_lookup.__sizeof__() / 1000000\n + short_arr.__sizeof__() / 1000000)\n print(f'{total: <15.2f}: All objects size(MB)')\n print(f'{arr_size: <15.2f}: Array size(MB)')\n return maxval, arr_size",
"def get_bucket_boundaries(bucket_size: int, max_size: int) -> np.ndarray:\n return np.arange(bucket_size, max_size + bucket_size, bucket_size) + 1",
"def get_cutting_sizes(self, N_L):\n\n self.X0 = 0.\n self.Y0 = 0.\n\n # self.cutting_size_xs = np.array([2 ** (i + 1) for i\n # in range(int(np.log2(self.L)))])\n # self.cutting_size_xs = np.arange(1, self.L / 2)\n self.cutting_size_xs = np.array(\n sorted(set(map(\n int,\n np.logspace(0, np.log(self.L) / np.log(1.5), base=1.5, num=N_L)\n ))))\n # self.cutting_size_ys = self.cutting_size_xs * (np.sqrt(3) / 2)\n self.cutting_size_ys = self.cutting_size_xs\n self.cutting_size_max_width = self.cutting_size_xs[-1]\n self.cutting_size_max_height = self.cutting_size_ys[-1]\n\n self.cutting_sizes = np.array([self.cutting_size_xs,\n self.cutting_size_ys]).T\n return",
"def set_max_noutput_items(self, *args, **kwargs):\n return _ncofdm_swig.ShortPNdetector_sptr_set_max_noutput_items(self, *args, **kwargs)",
"def process_lim(pool_lim, area):\n\n pool_nolim = [] # No limitation\n pool_lim_n = [] # N limitation\n pool_lim_p = [] # P limitation\n # Colimitation driven by N (When the realized NPP allocation is smaller\n # thant the potential due to N but the other element is also limitant)\n pool_colim_n = []\n # Colimitation driven by P (When the realized NPP allocation is smaller\n # than the potential due to P but the other element is also limitant\n pool_colim_p = []\n # Real Colimitation = K <= 1D-6 (K is difference between P and N realized NPP allocation)\n pool_colim_np = []\n\n ndays = pool_lim.shape[1]\n npls = pool_lim.shape[0]\n\n for pls in range(npls):\n if area[pls]:\n no_lim = (pool_lim[pls, :] == 0).sum() / ndays * area[pls]\n lim_n = (np.count_nonzero(\n pool_lim[pls, :] == 1) / ndays) * area[pls]\n lim_p = (np.count_nonzero(\n pool_lim[pls, :] == 2) / ndays) * area[pls]\n colim_n = (np.count_nonzero(\n pool_lim[pls, :] == 4) / ndays) * area[pls]\n colim_p = (np.count_nonzero(\n pool_lim[pls, :] == 5) / ndays) * area[pls]\n colim_np = (np.count_nonzero(\n pool_lim[pls, :] == 6) / ndays) * area[pls]\n\n pool_nolim.append(no_lim)\n pool_lim_n.append(lim_n)\n pool_lim_p.append(lim_p)\n pool_colim_n.append(colim_n)\n pool_colim_p.append(colim_p)\n pool_colim_np.append(colim_np)\n\n return (np.sum(pool_nolim),\n np.sum(pool_lim_n),\n np.sum(pool_lim_p),\n np.sum(pool_colim_n),\n np.sum(pool_colim_p),\n np.sum(pool_colim_np))",
"def max_pooling(img):\n result_img = img.copy()\n heignt, width, _ = result_img.shape\n for h in range(0, heignt, 8):\n for w in range(0, width, 8):\n result_img[h:h+8, w:w+8, 0] = np.max(result_img[h:h+8, w:w+8, 0])\n result_img[h:h+8, w:w+8, 1] = np.max(result_img[h:h+8, w:w+8, 1])\n result_img[h:h+8, w:w+8, 2] = np.max(result_img[h:h+8, w:w+8, 2])\n result_img[(heignt//8)*8:heignt, :, :] = 0\n result_img[:, (width//8)*8:width, :] = 0\n return result_img",
"def create_array( n ):",
"def max_noutput_items(self):\n return _inatel5g_swig.mqam_map_bc_sptr_max_noutput_items(self)",
"def max_noutput_items(self):\n return _spacegrant_swig.invert_bit_sptr_max_noutput_items(self)",
"def max_noutput_items(self):\n return _ncofdm_swig.ShortPNdetector_sptr_max_noutput_items(self)",
"def expanding_max_1d_nb(a, minp=1):\n out = np.empty_like(a, dtype=np.float_)\n maxv = a[0]\n cnt = 0\n for i in range(a.shape[0]):\n if np.isnan(maxv) or a[i] > maxv:\n maxv = a[i]\n if ~np.isnan(a[i]):\n cnt += 1\n if cnt < minp:\n out[i] = np.nan\n else:\n out[i] = maxv\n return out",
"def cut_histogram(net,typec):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get box sizes that are wither 2N or 32N, within the limits set by the user | def _get_box_sizes(self, image_info, cat):
file_id=0
impath=image_info['image_path'][file_id].strip()
ext=image_info['image_ext'][file_id]
wcs_data = fitsio.read_header(impath, ext=ext)
wcs = eu.wcsutil.WCS(wcs_data)
jacob = wcs.get_jacobian(100,100)
dudcol, dudrow, dvdcol, dvdrow = jacob
det = dvdrow*dudcol - dvdcol*dudrow
pixel_scale = np.sqrt(abs(det))
print('found pixel scale:',pixel_scale)
box_size = cat['box_size_arcsec']/pixel_scale
# clip to range
box_size.clip(
min=self['min_box_size'],
max=self['max_box_size'],
out=box_size,
)
box_size = box_size.astype('i4')
w,=np.where( ( (box_size % 2) != 0 ) )
if w.size > 0:
box_size[w] += 1
return box_size | [
"def create_compute_box_size(self):\n def compute_best_size_for(dim):\n size = ((self.element_space[dim]-1)//self.box_space[dim]) + 1\n size += 2 * self.ghost_space[dim]\n while size % Level.BOX_ALIGNMENTS[dim]:\n size += 1\n return size\n\n return Space([compute_best_size_for(dim) for dim in range(self.dimensions)])",
"def _get_block_sizes(resnet_size):\n choices = {\n 18: [2, 2, 2, 2],\n 34: [3, 4, 6, 3],\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3],\n 200: [3, 24, 36, 3]\n }\n\n try:\n return choices[resnet_size]\n except KeyError:\n err = ('Could not find layers for selected Resnet size.\\n'\n 'Size received: {}; sizes allowed: {}.'.format(\n resnet_size, list(choices.keys())))\n raise ValueError(err)",
"def _get_block_sizes(resnet_size):\n choices = {\n 18: [2, 2, 2, 2],\n 34: [3, 4, 6, 3],\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3],\n 200: [3, 24, 36, 3]\n }\n\n try:\n return choices[resnet_size]\n except KeyError:\n err = (\n 'Could not find layers for selected Resnet size.\\n'\n 'Size received: {}; sizes allowed: {}.'.format(\n resnet_size, choices.keys()))\n raise ValueError(err)",
"def guess_box_size(xyz):\n return np.round(np.max(xyz[:, 1] - np.min(xyz[:, 1]), 0))",
"def _acceptable_dimensions(self, box):\n return self._min_width < box.x1-box.x0 < self._max_width and\\\n self._min_height < box.y1-box.y0 < self._max_height",
"def get_box_size(self):\n return self.__box_size",
"def get_block_sizes(resnet_size):\n choices = {\n 18: [2, 2, 2, 2],\n 34: [3, 4, 6, 3],\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3],\n 200: [3, 24, 36, 3]\n }\n\n try:\n return choices[resnet_size]\n except KeyError:\n err = ('Could not find layers for selected Resnet size.\\n'\n 'Size received: {}; sizes allowed: {}.'.format(\n resnet_size, choices.keys()))\n raise ValueError(err)",
"def _scale_dimensions(bound_box, factor):\n if (factor <= 0):\n raise Exception(\"scaling factor must be positive\")\n exit(1)\n [[x_min, y_min], [x_max, y_max]] = bound_box\n x_avg_diff = (x_max-x_min) / 2.0\n y_avg_diff = (y_max-y_min) / 2.0\n x_scale = (factor - 1) * x_avg_diff\n y_scale = (factor - 1) * y_avg_diff\n return [[(x_min - x_scale), (y_min - y_scale)], [(x_max + x_scale), (y_max + y_scale)]]",
"def GetNiceExtentsBySpacing(minval,maxval,spacing,tolerance):\n pass",
"def get_grid_sizes(self) ->torch.LongTensor:\n return self.locator.get_grid_sizes()",
"def setBoxsize(length,width,height):\n return length,width,height",
"def get_box_filter_size():\n try:\n box_filter_size = float(input(\"Set size of box filter (default: 3): \"))\n box_filter_size = next_odd_integer(box_filter_size)\n except ValueError:\n box_filter_size = 3\n print(\"Box filter size set to \" + str(box_filter_size))\n return int(box_filter_size)",
"def getLayoutDimensions(n, pref=\"height\"):\n nopt = np.sqrt(n)\n inoptw = int(nopt)\n inopth = int(nopt)\n while inoptw * inopth < n:\n if pref == \"width\":\n inoptw += 1\n if inoptw * inopth > (n - inopth):\n inoptw -= 1\n inopth += 1\n else:\n inopth += 1\n if inoptw * inopth > (n - inoptw):\n inopth -= 1\n inoptw += 1\n\n return (inopth, inoptw)",
"def boxSize(self):\n try:\n return [float(size) for size in self.comment.split(\",\")]\n except:\n raise(InputError(self.comment, \"Cannot read size of box from comment.\"))",
"def get_bounding_box_size(images):\n height = max(image.shape[0] for image in images)\n width = max(image.shape[1] for image in images)\n return height, width",
"def dimensions():",
"def axes_limits_in_pixels(size):\n w, h = parse_size(size)\n l = -w / 2.\n r = w / 2.\n t = -h / 2.\n b = h / 2.\n return(l, r, t, b)",
"def get_term_dimensions():\n height, width = subprocess.check_output(SIZE).split()\n return int(width), int(height)",
"def maxSize():\n rect = pf.app.desktop().availableGeometry()\n maxh,maxw = rect.width(),rect.height()\n return maxh,maxw"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get the image info structure Set default scale to 1.0. The other fields are 0 for numbers, or blank for strings | def get_image_info_struct(nimage, path_len,
image_id_len=None,
wcs_len=None,
ext_len=None,
extra_dtype=None):
dt = get_image_info_dtype(
path_len,
image_id_len=image_id_len,
wcs_len=wcs_len,
ext_len=ext_len,
extra_dtype=extra_dtype,
)
data = np.zeros(nimage, dtype=dt)
data['scale'] = 1.0
return data | [
"def _get_meta_dict(self, img) -> dict:\n return {\"format\": img.format, \"mode\": img.mode, \"width\": img.width, \"height\": img.height}",
"def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)",
"def image_data_info(page):\n xObject = page['/Resources']['/XObject'].getObject()\n\n for obj_key in xObject:\n obj = xObject[obj_key]\n if obj['/Subtype'] == '/Image':\n width, height = (obj['/Width'], obj['/Height'])\n num_bytes = len(obj._data)\n density = num_bytes * 1.0 / (width * height)\n return {'width': width, 'height': height, 'size': num_bytes, 'density': density}\n\n return None",
"def get_img_properties(img):\n return (img.format, img.size, img.mode)",
"def image_info(self):\n\n if not self._image_info:\n path_image_info = os.path.join(\n self._path, f\"ImageSet_{self._image['ImageSetID']}.ImageInfo\"\n )\n\n # Make sure the ImageInfo file really exists\n if not os.path.exists(path_image_info):\n self.logger.warning(\"ImageInfo path doesn't exist: %s\", path_image_info)\n return None\n\n self.logger.debug(\"Reading image data from: %s\", path_image_info)\n self._image_info = pinn_to_dict(path_image_info)\n\n return self._image_info",
"def __bobo_traverse__(self, REQUEST, name):\n if name.startswith('image'):\n field = self.getField('image')\n image = None\n if name == 'image':\n image = field.getScale(self)\n else:\n scalename = name[len('image_'):]\n if scalename in field.getAvailableSizes(self):\n image = field.getScale(self, scale=scalename)\n if image is not None and not isinstance(image, basestring):\n # image might be None or '' for empty images\n return image\n\n return super(Promotion, self).__bobo_traverse__(REQUEST, name)",
"def get_image_format_for_scale(scale=1.0):\n img = read_single(1, -70, 0, scale)\n return img.shape",
"def scalarInfo(img, cnt):\n\tm = cntInfo(img, cnt)\n\td = {\"perimeter\":m[\"perimeter\"], \"oreientation\":m[\"orientation\"], \"solidity\":m[\"solidity\"],\"height\":m[\"height\"], \"extent\":m[\"extent\"], \"aspect ratio\":m[\"aspect ratio\"], \"area\":m[\"area\"], \"sum intensity\":m[\"sum intensity\"], \"width\":m[\"width\"], \"equivalent diameter\": m[\"equivalent diameter\"], \"mean intensity\": m[\"mean intensity\"]}\n\treturn d",
"def get_image_size(self):",
"def test_get_media_info_image(self):\n img = image()\n info_img, info_format = _get_media_info(img.pk, 'image')\n eq_(img.pk, info_img.pk)\n eq_('jpeg', info_format)",
"def __init__(self, data, pixscale = 7.77/43):\n self.data = data\n self.pixscale = pixscale",
"def get_image_properties(file: str) -> Dict:\n return {\n 'width': 1000,\n 'height': 1000,\n }",
"def get_image_scale(scale):\n if not isinstance(scale, str):\n return scale\n if IMAGE_ENUMS:\n return Resampling[scale]\n return getattr(Image, scale)",
"def change_info(self) -> None:\r\n info: Dict[str, str] = image_info(self.sw.current_image_id())\r\n self.change_image_count()\r\n if len(info) > 0:\r\n self.image_res.setText(info['resolution'])\r\n else:\r\n self.image_res.setText('Image info not found')",
"def get_image_meta(image_file):\n image = Image.open(image_file)\n\n result = {}\n\n result['pixel-width'], result['pixel-height'] = image.size\n\n # this code is for retina images on non-retina displays\n image_dpi = image.info.get('dpi')\n if image_dpi and len(image_dpi) == 2:\n result['dpi-width'], result['dpi-height'] = image_dpi\n else:\n result['dpi-width'], result['dpi-height'] = (72, 72)\n\n result['width'] = result['pixel-width'] * 72 / result['dpi-width']\n result['height'] = result['pixel-height'] * 72 / result['dpi-height']\n\n return result",
"def GetScale(self):\n ...",
"def __attrs_post_init__(self):\n self.key = uuid.uuid4().hex\n if self.properties is None:\n self.properties = {}\n if self.is_image:\n try:\n img_size = Image.open(self.open()).size\n self.properties.update(width=img_size[0], height=img_size[1])\n except IOError:\n self.content_type = 'application/octet-stream'",
"def test_get_scale_description(self):\n image = self._callFUT(self.data)\n self.assertIsNone(image.get('description'))",
"def set_attributes(self):\n\n pil_image = PILImage.open(self.path)\n\n # Get the exif data\n # Thanks https://gist.github.com/erans/983821\n exif_data = {}\n info = pil_image._getexif()\n if info:\n for tag, value in info.items():\n decoded = PILExifTags.TAGS.get(tag, tag)\n if decoded == \"GPSInfo\":\n gps_data = {}\n for t in value:\n sub_decoded = PILExifTags.GPSTAGS.get(t, t)\n gps_data[sub_decoded] = value[t]\n\n exif_data[decoded] = gps_data\n else:\n exif_data[decoded] = value\n\n gps_latitude = exif_data.get(\"GPSInfo\",{}).get(\"GPSLatitude\")\n gps_latitude_ref = exif_data.get(\"GPSInfo\",{}).get('GPSLatitudeRef')\n gps_longitude = exif_data.get(\"GPSInfo\",{}).get('GPSLongitude')\n gps_longitude_ref = exif_data.get(\"GPSInfo\",{}).get('GPSLongitudeRef')\n gps_altitude = exif_data.get(\"GPSInfo\",{}).get('GPSAltitude')\n gps_altitude_ref = exif_data.get(\"GPSInfo\",{}).get('GPSAltitudeRef')\n gps_direction = exif_data.get(\"GPSInfo\",{}).get('GPSImgDirection')\n gps_direction_ref = exif_data.get(\"GPSInfo\",{}).get('GPSImgDirectionRef')\n\n if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:\n lat = gps_tag_to_decimal_degress(gps_latitude)\n if gps_latitude_ref != \"N\": \n lat = 0 - lat\n\n lon = gps_tag_to_decimal_degress(gps_longitude)\n if gps_longitude_ref != \"E\":\n lon = 0 - lon\n\n # image attributes\n self.width, self.height = pil_image.size\n # exif attributes\n self.lat, self.lon = lat, lon\n self.focal = float(exif_data[\"FocalLengthIn35mmFilm\"])\n self.timestamp = datetime.datetime.strptime(exif_data[\"DateTimeOriginal\"], \"%Y:%m:%d %H:%M:%S\").timestamp()\n self.altitude = gps_altitude[0] / gps_altitude[1]\n self.direction = float(gps_direction) if gps_direction is not None else None\n self.pixel_size = (self.altitude * 35.0 / self.focal) / float(self.width)\n # transform attributes\n self.point = self.drone_map.reproject(lon,lat)\n self.angle = float(gps_direction) if gps_direction is not None else 0\n self.scale = 1.0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get the image_info dtype for the specified path string length and wcs string length | def get_image_info_dtype(path_len,
image_id_len=None,
wcs_len=None,
ext_len=None,
extra_dtype=None):
path_fmt = 'U%d' % path_len
if image_id_len is None:
image_id_descr = 'i8'
else:
image_id_descr = 'U%d' % image_id_len
if ext_len is not None:
ext_descr = 'U%d' % ext_len
else:
ext_descr = 'i2'
dt=[]
for ctype in IMAGE_INFO_TYPES:
path_name = '%s_path' % ctype
ext_name = '%s_ext' % ctype
dt += [
(path_name, path_fmt),
(ext_name,ext_descr),
]
dt += [
('image_id', image_id_descr),
('image_flags', 'i8'),
('magzp', 'f4'),
('scale', 'f4'),
('position_offset','f8'),
]
if wcs_len is not None:
wcs_fmt = 'U%d' % wcs_len
dt += [
('wcs',wcs_fmt),
]
if extra_dtype is not None:
dt += extra_dtype
return dt | [
"def get_image_info_struct(nimage, path_len,\n image_id_len=None,\n wcs_len=None,\n ext_len=None,\n extra_dtype=None):\n dt = get_image_info_dtype(\n path_len,\n image_id_len=image_id_len,\n wcs_len=wcs_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n\n data = np.zeros(nimage, dtype=dt)\n\n data['scale'] = 1.0\n\n return data",
"def find_img_type(strng):\n format=(strng.split(\",\",1)[0]).split(\"/\")[1][:-7]\n return format",
"def formatLookup(format_str):\n pat = '(\\d+)([A-Z])'\n match = re.search(pat, format_str)\n #print match.group()\n \n data_len = int(match.group(1))\n data_fmt = str(match.group(2))\n np_fmt = fitsFormatLookup(data_fmt)\n np_dtype = '%i%s'%(data_len, np_fmt)\n \n return np_dtype, data_len, np_fmt",
"def __open_image(image_location: str):\n try:\n with Image.open(image_location) as image:\n width = image.width\n image_format = image.format\n return width, image_format\n except OSError:\n return 0, ''",
"def get_image_size(path, width, type_name):\n fc = _os.path.getsize(path) / type_mapping[type_name].itemsize\n shape = [width, int(fc / width)]\n computed_size = shape[0] * shape[1] * type_mapping[type_name].itemsize\n measured_size = _os.path.getsize(path)\n return shape",
"def get_size_str(img):\n n_x, n_y = img.shape[:2]\n import numpy as np\n\n matrix_size = \"{0}x{1}\".format(num_to_str(n_x), num_to_str(n_y))\n\n voxel_dims = np.array(img.header.get_zooms()[:3])\n\n voxel_size = \"x\".join([num_to_str(s) for s in voxel_dims])\n\n\n fov = [n_x, n_y] * voxel_dims[:2]\n fov = \"x\".join([num_to_str(s) for s in fov])\n\n return voxel_size, matrix_size, fov",
"def get_wcs(img_file):\n\n if img_file.endswith('fz'):\n hdu = 1\n else:\n hdu = 0\n im = galsim.fits.read(img_file, hdu=hdu)\n return im.wcs",
"def image_size(cls,filepath):\n height = -1\n width = -1\n\n with open(str(filepath), 'rb') as fhandle:\n head = fhandle.read(24)\n size = len(head)\n\n # handle big endian TIFF\n if size >= 8 and head.startswith(b\"\\x4d\\x4d\\x00\\x2a\"):\n offset = struct.unpack('>L', head[4:8])[0]\n fhandle.seek(offset)\n ifdsize = struct.unpack(\">H\", fhandle.read(2))[0]\n for i in range(ifdsize):\n tag, datatype, count, data = struct.unpack(\">HHLL\", fhandle.read(12))\n if tag == 256:\n if datatype == 3:\n width = int(data / 65536)\n elif datatype == 4:\n width = data\n else:\n raise ValueError(\"Invalid TIFF file: width column data type should be SHORT/LONG.\")\n elif tag == 257:\n if datatype == 3:\n height = int(data / 65536)\n elif datatype == 4:\n height = data\n else:\n raise ValueError(\"Invalid TIFF file: height column data type should be SHORT/LONG.\")\n if width != -1 and height != -1:\n break\n if width == -1 or height == -1:\n raise ValueError(\"Invalid TIFF file: width and/or height IDS entries are missing.\")\n # handle little endian Tiff\n elif size >= 8 and head.startswith(b\"\\x49\\x49\\x2a\\x00\"):\n offset = struct.unpack('<L', head[4:8])[0]\n fhandle.seek(offset)\n ifdsize = struct.unpack(\"<H\", fhandle.read(2))[0]\n for i in range(ifdsize):\n tag, datatype, count, data = struct.unpack(\"<HHLL\", fhandle.read(12))\n if tag == 256:\n width = data\n elif tag == 257:\n height = data\n if width != -1 and height != -1:\n break\n if width == -1 or height == -1:\n raise ValueError(\"Invalid TIFF file: width and/or height IDS entries are missing.\")\n # handle little endian BigTiff\n elif size >= 8 and head.startswith(b\"\\x49\\x49\\x2b\\x00\"):\n bytesize_offset = struct.unpack('<L', head[4:8])[0]\n if bytesize_offset != 8:\n raise ValueError('Invalid BigTIFF file: Expected offset to be 8, found {} instead.'.format(offset))\n offset = struct.unpack('<Q', head[8:16])[0]\n fhandle.seek(offset)\n ifdsize = struct.unpack(\"<Q\", fhandle.read(8))[0]\n for i in range(ifdsize):\n tag, datatype, count, data = struct.unpack(\"<HHQQ\", fhandle.read(20))\n if tag == 256:\n width = data\n elif tag == 257:\n height = data\n if width != -1 and height != -1:\n break\n if width == -1 or height == -1:\n raise ValueError(\"Invalid BigTIFF file: width and/or height IDS entries are missing.\")\n\n return width, height",
"def fl_get_string_dimension(style, size, txtstr, strlng):\n _fl_get_string_dimension = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_string_dimension\",\\\n None, [cty.c_int, cty.c_int, xfdata.STRING, cty.c_int,\n cty.POINTER(cty.c_int), cty.POINTER(cty.c_int)],\\\n \"\"\"void fl_get_string_dimension(int fntstyle, int fntsize,\n const char * s, int len, int * width, int * height)\"\"\")\n library.check_if_flinitialized()\n library.checkfatal_allowed_value_in_list(style, xfdata.TEXTSTYLE_list)\n i_style = library.convert_to_intc(style)\n i_size = library.convert_to_intc(size)\n s_txtstr = library.convert_to_bytestrc(txtstr)\n i_strlng = library.convert_to_intc(strlng)\n i_width, ptr_width = library.make_intc_and_pointer()\n i_height, ptr_height = library.make_intc_and_pointer()\n library.keep_elem_refs(style, i_style, size, i_size, txtstr, s_txtstr, \\\n strlng, i_strlng, i_width, i_height, ptr_width, ptr_height)\n _fl_get_string_dimension(i_style, i_size, s_txtstr, i_strlng, \\\n ptr_width, ptr_height)\n return i_width.value, i_height.value",
"def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)",
"def get_image_size(file_path): # type: (str) -> (int, int)\n size = path.getsize(file_path)\n\n with open(file_path) as f:\n data = f.read(25)\n\n if (size >= 10) and data[:6] in (\"GIF87a\", \"GIF89a\"):\n # GIFs\n w, h = struct.unpack(\"<HH\", data[6:10])\n width = int(w)\n height = int(h)\n elif (\n (size >= 24)\n and data.startswith(\"\\211PNG\\r\\n\\032\\n\")\n and (data[12:16] == \"IHDR\")\n ):\n # PNGs\n w, h = struct.unpack(\">LL\", data[16:24])\n width = int(w)\n height = int(h)\n elif (size >= 16) and data.startswith(\"\\211PNG\\r\\n\\032\\n\"):\n # older PNGs?\n w, h = struct.unpack(\">LL\", data[8:16])\n width = int(w)\n height = int(h)\n elif (size >= 2) and data.startswith(\"\\377\\330\"):\n # JPEG\n msg = \" raised while trying to decode as JPEG.\"\n f.seek(0)\n f.read(2)\n b = f.read(1)\n try:\n while b and ord(b) != 0xDA:\n while ord(b) != 0xFF:\n b = f.read(1)\n while ord(b) == 0xFF:\n b = f.read(1)\n if 0xC0 <= ord(b) <= 0xC3:\n f.read(3)\n h, w = struct.unpack(\">HH\", f.read(4))\n break\n else:\n f.read(int(struct.unpack(\">H\", f.read(2))[0]) - 2)\n b = f.read(1)\n width = int(w)\n height = int(h)\n except struct.error:\n raise UnknownImageFormat(\"StructError\" + msg)\n except ValueError:\n raise UnknownImageFormat(\"ValueError\" + msg)\n except Exception as e:\n raise UnknownImageFormat(e.__class__.__name__ + msg)\n else:\n raise UnknownImageFormat(\n \"Sorry, don't know how to get information from this file.\"\n )\n\n return width, height",
"def get_img_properties(img):\n return (img.format, img.size, img.mode)",
"def get_imagetype_data(filename):\n study = mudicom.load(filename)\n data = study.read()\n element_name_list = [x.name for x in data]\n element_value_list = [x.value for x in data]\n \n compression = element_value_list[element_name_list.index(\"Derivation Description\")]\n color_scheme = element_value_list[element_name_list.index(\"Photometric Interpretation\")]\n \n return compression, color_scheme",
"def dims(filespec, verbose=False):\n with open(filespec, \"rb\") as f:\n if f.read(4) == b\"\\x76\\x2f\\x31\\x01\": # EXR magic number\n version = np.frombuffer(f.read(4), dtype=\"<u4\")[0]\n max_strlen = 256 if (version & 0x400) else 32\n got_channels = False\n got_dims = False\n while not (got_channels and got_dims):\n attr_name = _read_string_nul(f, max_strlen)\n _ = _read_string_nul(f, max_strlen) # attr_type\n attr_size = np.frombuffer(f.read(4), dtype=\"<u4\")[0]\n if attr_name == \"channels\":\n nchan = 0\n isfloat = False\n bitdepth = 16\n while not got_channels:\n name = _read_string_nul(f, max_strlen)\n if len(name) >= 1:\n dtype = np.frombuffer(f.read(16), dtype=\"<u4\")[0]\n isfloat = isfloat or (dtype > 0)\n bitdepth = max(bitdepth, 16 if dtype == 1 else 32)\n nchan += 1\n else:\n got_channels = True\n elif attr_name == \"dataWindow\":\n box = np.frombuffer(f.read(16), dtype=\"<i4\")\n xmin, ymin, xmax, ymax = box\n width = xmax - xmin + 1\n height = ymax - ymin + 1\n got_dims = True\n else:\n _ = f.seek(attr_size, 1)\n if verbose:\n print(f\"Reading file {filespec} \", end='')\n print(f\"(w={width}, h={height}, c={nchan}, bitdepth={bitdepth})\")\n return width, height, nchan, isfloat, bitdepth\n raise RuntimeError(f\"File {filespec} is not a valid EXR file.\")",
"def extract_img_data(offset_dict, d_type, in_file):\n log.debug(\"Entering extract_img_data()\")\n\n data_types = {'height': ('height', np.uint32, '<I', 4),\n 'light': ('light', np.uint16, '<H', 2)}\n data = dict()\n data['name'] = d_type.capitalize()\n in_file.seek(offset_dict[data_types[d_type][0]])\n data['width'] = struct.unpack('<I', in_file.read(4))[0]\n data['height'] = struct.unpack('<I', in_file.read(4))[0]\n data['bit_depth'] = struct.unpack('<I', in_file.read(4))[0]\n data['compression'] = struct.unpack('<I', in_file.read(4))[0]\n data['data_byte_size'] = struct.unpack('<I', in_file.read(4))[0]\n data['palette_range_min'] = struct.unpack('<I', in_file.read(4))[0]\n data['palette_range_max'] = struct.unpack('<I', in_file.read(4))[0]\n # The palette section of the hexdump is 768 bytes long has 256 3-byte\n # repeats, for now I will store them as a 1d array of uint8 values\n palette = np.zeros(768, dtype=np.uint8)\n\n i = 0\n for val in range(768):\n palette[i] = ord(in_file.read(1))\n i = i+1\n data['palette'] = palette\n\n array = np.zeros((data['width']*data['height']), dtype=data_types[d_type][1])\n int_type = data_types[d_type][2]\n bytesize = data_types[d_type][3]\n i = 0\n for val in range(data['width']*data['height']):\n # array[i] = data_types[d_type][2](in_file)\n array[i] = struct.unpack(int_type, in_file.read(bytesize))[0]\n i = i + 1\n data['data'] = array\n\n log.debug(\"Exiting extract_img_data()\")\n return data",
"def get_filetype(filepath, logger=None):\n # Developer Note\n # Since we have the impending code-freeze, keeping the behavior the same,\n # just changing the implementation.\n # The previous logic did not test for GRIB it would just return 'GRIB'\n # if you couldn't run ncdump on the file.\n # Also note:\n # As John indicated ... there is the case when a grib file\n # may not start with GRIB ... and if you pass the MET command filtetype=GRIB\n # MET will handle it ok ...\n\n # Notes on file format and determining type.\n # https://www.wmo.int/pages/prog/www/WDM/Guides/Guide-binary-2.html\n # https://www.unidata.ucar.edu/software/netcdf/docs/faq.html\n # http: // www.hdfgroup.org / HDF5 / doc / H5.format.html\n\n # Interpreting single byte by byte - so ok to ignore endianess\n # od command:\n # od -An -c -N8 foo.nc\n # od -tx1 -N8 foo.nc\n # GRIB\n # Octet no. IS Content\n # 1-4 'GRIB' (Coded CCITT-ITA No. 5) (ASCII);\n # 5-7 Total length, in octets, of GRIB message(including Sections 0 & 5);\n # 8 Edition number - currently 1\n # NETCDF .. ie. od -An -c -N4 foo.nc which will output\n # C D F 001\n # C D F 002\n # 211 H D F\n # HDF5\n # Magic numbers Hex: 89 48 44 46 0d 0a 1a 0a\n # ASCII: \\211 HDF \\r \\n \\032 \\n\n\n # Below is a reference that may be used in the future to\n # determine grib version.\n # import struct\n # with open (\"foo.grb2\",\"rb\")as binary_file:\n # binary_file.seek(7)\n # one_byte = binary_file.read(1)\n #\n # This would return an integer with value 1 or 2,\n # B option is an unsigned char.\n # struct.unpack('B',one_byte)[0]\n\n # if filepath is set to None, return None to avoid crash\n if filepath == None:\n return None\n\n try:\n # read will return up to 8 bytes, if file is 0 bytes in length,\n # than first_eight_bytes will be the empty string ''.\n # Don't test the file length, just adds more time overhead.\n with open(filepath, \"rb\") as binary_file:\n binary_file.seek(0)\n first_eight_bytes = binary_file.read(8)\n\n # From the first eight bytes of the file, unpack the bytes\n # of the known identifier byte locations, in to a string.\n # Example, if this was a netcdf file than ONLY name_cdf would\n # equal 'CDF' the other variables, name_hdf would be 'DF '\n # name_grid 'CDF '\n name_cdf, name_hdf, name_grib = [None] * 3\n if len(first_eight_bytes) == 8:\n name_cdf = struct.unpack('3s', first_eight_bytes[:3])[0]\n name_hdf = struct.unpack('3s', first_eight_bytes[1:4])[0]\n name_grib = struct.unpack('4s', first_eight_bytes[:4])[0]\n\n # Why not just use a else, instead of elif else if we are going to\n # return GRIB ? It allows for expansion, ie. Maybe we pass in a\n # logger and log the cases we can't determine the type.\n if name_cdf == 'CDF' or name_hdf == 'HDF':\n return \"NETCDF\"\n elif name_grib == 'GRIB':\n return \"GRIB\"\n else:\n # This mimicks previous behavoir, were we at least will always return GRIB.\n # It also handles the case where GRIB was not in the first 4 bytes\n # of a legitimate grib file, see John.\n # logger.info('Can't determine type, returning GRIB\n # as default %s'%filepath)\n return \"GRIB\"\n\n except IOError:\n # Skip the IOError, and keep processing data.\n # ie. filepath references a file that does not exist\n # or filepath is a directory.\n return None\n\n # Previous Logic\n # ncdump_exe = config.getexe('NCDUMP')\n #try:\n # result = subprocess.check_output([ncdump_exe, filepath])\n\n #except subprocess.CalledProcessError:\n # return \"GRIB\"\n\n #regex = re.search(\"netcdf\", result)\n #if regex is not None:\n # return \"NETCDF\"\n #else:\n # return None",
"def get_image_format_from_datatext(self, datatext):\n image_format = \"VIRT\"\n temp = re.search('VX_DF_IMAGE_(.+?)\\]', datatext) #Obs. Needed to ecape the [ ]'s\n if temp:\n image_format = temp.group(1)\n return image_format",
"def dims(filespec, verbose=False):\n with open(filespec, \"rb\") as f:\n header = f.read(64) # should be enough for any valid header\n shape, scale = __parse_header(header, filespec, verbose)\n return (shape, scale)",
"def get_sif_info(fileName):\n f = open(fileName, 'rb')\n header = f.read(5000) ##Random, seems-to-be-large-enough number of bytes.\n if header[0:35] != 'Andor Technology Multi-Channel File':\n raise UserWarning(\"Header doesn't start like an Andor .sif\")\n\n \"\"\"Find the description of the image shape:\"\"\"\n firstPixNum = header.find('Pixel number') ## Pixel shape soon after\n shapeData = header[firstPixNum:firstPixNum + 300].splitlines()\n numLine = shapeData[2]\n (startIm, endIm) = numLine.split()[5:7]\n numImages = 1 + int(endIm) - int(startIm)\n coordLine = shapeData[3]\n (left, top, right, bottom) = coordLine.split()[1:5] ##Check this!\n (xPix, yPix) = (1 + int(right) - int(left), 1 + int(top) - int(bottom))\n \"\"\"Determine the offset of the binary data:\"\"\"\n offset = firstPixNum + sum([1 + len(i) for i in shapeData[0:4]])\n f.seek(offset)\n numLines = 0\n numBytes = 0\n while numLines < numImages:\n b = f.read(1)\n numBytes += 1\n if b == '\\n':\n numLines += 1\n offset += numBytes\n f.close()\n return (xPix, yPix, numImages, offset)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Move files out of subdirectories in the current working directory. | def move_file():
# print("\n".join(os.listdir(filepath)))
# folders = [os.path.join(filepath, fld) for fld in os.listdir(filepath)]
# print(filepath + ":\n " + "\n ".join(folders))
folders = filter(os.path.isdir, os.listdir(u"."))
# print("Sub-folders: ", u"\n".join(folders))
for folder in folders:
files = [os.path.join(folder, fn) for fn in os.listdir(folder)]
files = filter(os.path.isfile, files)
for fn in files:
_, filename = os.path.split(fn)
shutil.move(fn, filename)
assert 0 == len(os.listdir(folder)) | [
"def _move_files(self):\n self._move_directory(self._origin, self._destination)\n for directory in self._filesystem.listdir(self._filesystem.join(self._layout_tests_root, PLATFORM_DIRECTORY)):\n self._move_directory(self._filesystem.join(PLATFORM_DIRECTORY, directory, self._origin),\n self._filesystem.join(PLATFORM_DIRECTORY, directory, self._destination))",
"def move_and_merge_tree(src, dst):\n if not osp.exists(dst):\n shutil.move(src, dst)\n else:\n for fp in os.listdir(src):\n src_fp = osp.join(src, fp)\n dst_fp = osp.join(dst, fp)\n if osp.isdir(src_fp):\n if osp.isdir(dst_fp):\n move_and_merge_tree(src_fp, dst_fp)\n else:\n shutil.move(src_fp, dst_fp)\n elif osp.isfile(src_fp) and \\\n not osp.isfile(dst_fp):\n shutil.move(src_fp, dst_fp)",
"def move_recursively(src, dst, overwrite=False, changed_only=True):\n if os.path.isdir(src):\n movetree(src, dst, overwrite, changed_only)\n else:\n movefile(src, dst, overwrite, changed_only)",
"def move_files(fname_fout, root_dir, dest_dir):\n fname, f_ext = os.path.splitext(fname_fout)\n # Find files which filename of fname_fout\n matches = []\n pattern = fname + '*'\n root_fnames = os.listdir(root_dir)\n for filename in fnmatch.filter(root_fnames, pattern):\n matches.append([filename, os.path.join(root_dir, filename)])\n # Extract new folder name based on fname_fout\n new_folder_name = reshape_fname(fname_fout, ['nairfoil', 'nsetup'])\n dest_dir = os.path.join(dest_dir, new_folder_name)\n # Move files\n for cur_file in matches:\n os.renames(cur_file[1], os.path.join(dest_dir, cur_file[0]))",
"def bulk_move_to_dir(target_files, dest_dir_path):\n if target_files is not None:\n for target_file in target_files:\n move_to_dir(target_file, dest_dir_path)",
"def main():\n os.chdir(\"FilesToSort\")\n files = os.listdir('.')\n for file in files:\n extension_directory = file[file.find('.') + 1:]\n try:\n os.mkdir(extension_directory)\n except FileExistsError:\n pass\n shutil.move(file, extension_directory)",
"def main():\n os.chdir('FilesToSort')\n extension_to_category = {}\n for filename in os.listdir('.'):\n if os.path.isdir(filename):\n continue\n extension = filename.split('.')[-1]\n make_subdirectories(extension, extension_to_category)\n shutil.move(filename, extension_to_category[extension])",
"def movetree(src, dst):\n names = os.listdir(src)\n\n # Create destination dir if it does not exist\n if not os.path.exists(dst):\n os.makedirs(dst)\n errors = []\n\n for name in names:\n srcname = os.path.join(src, name)\n dstname = os.path.join(dst, name)\n try:\n if os.path.isdir(srcname) and os.path.isdir(dstname):\n movetree(srcname, dstname)\n os.rmdir(srcname)\n else:\n shutil.move(srcname, dstname)\n except (IOError, os.error) as why:\n errors.append((srcname, dstname, str(why)))\n # catch the Error from the recursive movetree so that we can\n # continue with other files\n except Exception as err:\n errors.extend(err.args[0])\n if errors:\n raise Exception(errors)",
"def _move_files(topdatadir, startdate, model_forcing):\n\n curdate = startdate\n subdir = f\"{topdatadir}/cf_{model_forcing}\"\n subdir += f\"_{curdate.year:04d}{curdate.month:02d}\"\n files = glob.glob(f\"{subdir}/*.NC\")\n for filename in files:\n shutil.move(filename, os.path.join(topdatadir, os.path.basename(filename)))\n shutil.rmtree(subdir)",
"def moveDirectoryContents(self, source, target, force=False):\n if source.endswith('/') or source.endswith('\\\\'):\n source += '*'\n else:\n source += os.path.sep + '*'\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))",
"def move_all_files_from_subfolders_to_top(folder_path, delete_subfolders=False, copy=False):\n for item in os.listdir(folder_path):\n sub_path = os.path.join(folder_path, item)\n\n if os.path.isdir(sub_path):\n\n for sub_item in os.listdir(sub_path):\n src = os.path.join(sub_path, sub_item)\n target = os.path.join(folder_path, sub_item)\n\n if copy:\n if os.path.isfile(src):\n shutil.copy(src, target)\n else:\n shutil.copytree(src, target)\n else:\n shutil.move(src, target)\n\n if delete_subfolders:\n shutil.rmtree(sub_path)",
"def moveFiles(inputDir, inputFiles):\n\tfor file in inputFiles:\n\t\tlogger.debug('moveFiles: {0}'.format(file))\n\t\tshutil.move(join(inputDir, file), join(inputDir, 'processed', file))\n\n\treturn 0",
"def _sync_directories(from_directory, to_directory):\n if not os.path.exists(to_directory):\n os.mkdir(to_directory)\n for root, dirs, files in os.walk(from_directory):\n to_root = root.replace(from_directory, to_directory)\n for directory in dirs:\n to_child_dir = os.path.join(to_root, directory)\n if not os.path.exists(to_child_dir):\n os.mkdir(to_child_dir)\n for fname in files:\n from_file = os.path.join(root, fname)\n to_file = os.path.join(to_root, fname)\n with open(from_file, 'rb') as a, open(to_file, 'wb') as b:\n b.write(a.read())",
"def move_files(src_path, dst_path):\n\n # Check src_path and dst_path is exists.\n if not os.path.exists(src_path):\n print(\"src_path --> {} --> is not exists.\".format(src_path))\n return\n if not os.path.exists(dst_path):\n print(\"src_path is not exists.\")\n return\n\n # Get all files in src_path\n x = 1\n for root, dirs, files in os.walk(src_path):\n for file in files:\n # absolute src_path file\n src_abs_file = os.path.join(root, file)\n\n # absolute dst_path file\n dst_abs_file = os.path.join(dst_path, file)\n\n if not os.path.exists(dst_abs_file):\n print('{} --> is not in dst_path'.format(dst_abs_file))\n shutil.move(src_abs_file, dst_abs_file)\n else:\n rename_dst_abs_file = os.path.splitext(\n dst_abs_file)[0] + '_(' + str(x) + ')_' + os.path.splitext(\n dst_abs_file)[1]\n x += 1\n print(rename_dst_abs_file)\n shutil.move(src_abs_file, rename_dst_abs_file)",
"def move_word_files(self):\n destination_file = os.getcwd() + \"\\word_files\"\n for file in os.listdir():\n if file.endswith(\".docx\"):\n try:\n shutil.move(file, destination_file)\n except shutil.Error:\n pass\n for file in os.listdir():\n if file.endswith('.docx'):\n os.unlink(file)",
"def move_files(from_dir, to_dir, keyword):\n \n if not os.path.exists(to_dir):\n os.mkdir(to_dir)\n \n if keyword == None:\n # If keyword is left empty, from_dir is considered a list of files.\n to_move = from_dir\n else:\n to_move = glob.glob(os.path.join(from_dir, '*' + keyword + '*'))\n \n n_moved = 0 \n for f in to_move:\n if os.path.isfile(f):\n shutil.move(f, to_dir)\n n_moved += 1\n \n print \"Moved %i files to %s.\" % (n_moved, to_dir)",
"def move_files(src, dst, filenames):\n for filename in filenames:\n os.rename(os.path.join(src, filename), os.path.join(dst, filename))",
"def test_verifyMoveFiles(self):\n\n target = 'testes2/'\n self.createDirectorySource()\n self.createDirectoryTarget()\n self.createFiles()\n\n for file in self.files:\n m = re.search(self.regex, file)\n if m != None:\n command = \"%s%s\" %('testes/', file)\n shutil.move(command, target)\n\n after = os.listdir('testes/')\n moved = os.listdir('testes2/')\n\n self.assertEqual(len(after), len(self.files)-2)\n self.assertEqual(len(moved), 2)\n\n self.cleanSource()\n self.cleanTarget()",
"def mirror_directory_tree_with_files_loop(self, in_dirpath, out_dirpath, only_include_filetypes, include_file_suffix, avoid_files_with):\n for i in os.listdir(in_dirpath):\n if i[0] == '.' or i[:6] == 'README':\n continue\n elif os.path.isdir(in_dirpath + i):\n if not os.path.exists(out_dirpath + i):\n os.makedirs(out_dirpath + i)\n self.mirror_directory_tree_with_files_loop(self, in_dirpath + i + '/', out_dirpath + i + '/', only_include_filetypes, include_file_suffix, avoid_files_with)\n elif os.path.isfile(in_dirpath + i):\n if avoid_files_with:\n if avoid_files_with in '.'.join(i.split('.')[:-1]):\n continue\n if only_include_filetypes:\n suffix = i.split('.')[-1]\n if suffix in only_include_filetypes:\n if include_file_suffix:\n filename = i\n else:\n filename = '.'.join(i.split('.')[:-1])\n self.files_containing_filetype.update([in_dirpath + i])\n self.mirrored_filepaths.update([out_dirpath + filename])\n self.mirrored_directory_leaves.update([out_dirpath])\n else:\n if include_file_suffix or not '.' in i:\n filename = i\n else:\n filename = '.'.join(i.split('.')[:-1])\n self.files_containing_filetype.update([in_dirpath + i])\n self.mirrored_filepaths.update([out_dirpath + filename])\n self.mirrored_directory_leaves.update([out_dirpath])\n else:\n print dirpath + i, 'does not exist'\n return"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find duplications in submitted homework. | def find_duplication(homework):
re_id = re.compile(r'(?P<stuid>[0-9]{10,11})')
dup_check = dict()
with open(homework, 'r') as data:
lines = data.readlines()
for ln in lines:
dt = ln.split()
csum, right = dt[0], dt[1]
if csum not in dup_check:
dup_check[csum] = list()
m = re_id.search(right)
if m is not None:
stu_id = m.group('stuid')
dup_check[csum].append(stu_id)
dup_check = filter(lambda k, v: len(v) > 1, dup_check.items())
dup_check = [(key, sorted(val)) for key, val in dup_check]
return dup_check | [
"def _remove_dupes(recs, input, bad_movies, hist_list=[], feedback_list=[]):\n all_rated = input + bad_movies + hist_list + feedback_list\n nonlocal dupes\n dupes = [x for x in recs if x[0] in input]\n return [x for x in recs if x[0] not in all_rated]",
"def find_duplicates():\n return AppServer.service.find_duplicated_files()",
"def FilterDuplicates1(thisSearch):\n logging.info('Processing duplicates\\n' + '='*150)\n #Arrange the matches in a dict that has the matched word's head's database id as its key\n matchitems = sorted(thisSearch.matches.items())\n mheadids = dict()\n mheadids = defaultdict(list)\n for key, matches in matchitems:\n for match in matches:\n mhead = match.matchedword.head\n if not match.postprocessed:\n #If this match has not yet been processed\n try:\n while thisSearch.queried_table == 'ru_conll' and (match.matchedsentence.words[mhead].pos != 'V' and match.matchedsentence.words[mhead].deprel != 'ROOT'):\n #For Russian cases roll back to the main verb\n headword = match.matchedsentence.words[mhead]\n mhead = headword.head\n mheadids[match.matchedsentence.words[mhead].dbid].append(match)\n except KeyError:\n logging.info('Key error with sentence number {}'.format(match.matchedsentence.sentence_id))\n #Just for counting:\n total = 0\n for mheadid, matchlist in mheadids.items():\n if len(matchlist)>1:\n total += 1\n processed = 0\n #Iterate through the dict and process all the instances where one headid has multiple matches\n for mheadid, matchlist in mheadids.items():\n if len(matchlist)>1:\n processed += 1\n thisPair = PotentialDuplicatePair(matchlist)\n if not thisPair.CheckExistingRules():\n #If no predefined rules exist\n #Clear the output for conveniance\n os.system('cls' if os.name == 'nt' else 'clear')\n print('Processing duplicate no {}/{}'.format(processed,total))\n cont = thisPair.select()\n if not cont:\n break\n if thisPair.rejected != 'n':\n #If something was rejected, ask about a rule:\n createrule = yesnomenu()\n createrule.prompt_valid('Create a rule?')\n if createrule.answer =='y':\n thisPair.CreateRejectionRule()",
"def duplicates():\n dupes = ResultForm.objects.values(\n 'center', 'ballot', 'station_number').annotate(\n Count('id')).order_by().filter(id__count__gt=1).filter(\n center__isnull=False, ballot__isnull=False,\n station_number__isnull=False).exclude(form_state=FormState.UNSUBMITTED)\n\n pks = flatten([map(lambda x: x['id'], ResultForm.objects.filter(\n center=item['center'], ballot=item['ballot'],\n station_number=item['station_number']).values('id'))\n for item in dupes])\n\n return ResultForm.objects.filter(pk__in=pks)",
"def handle_duplicates(self, database):\n number_of_duplicates = 0\n number_of_merged = 0\n if not database.session:\n logger.error(\"no database session\")\n return (number_of_duplicates, number_of_merged)\n\n # return if this workout already has been checked\n if self.is_duplicate_with or self.manual_check_required_with:\n logger.debug(\"dup check - no check, since this workout is marked: {}\".format(self))\n return (number_of_duplicates, number_of_merged)\n\n # return if this workout does not have start_time set, since the following checks are based on it\n if not self.start_time or not self.duration_sec:\n return (number_of_duplicates, number_of_merged)\n\n # potential duplicate if time is overlapping\n # this workout |-----------------|\n # 1st potential duplicate in db |-----------------|\n # 2nd potential duplicate in db |------------------------|\n # 3rd potential duplicate in db |----------------|\n # 4th potential duplicate in db |---------|\n # (Remark to line 2 of 1st filter: needed to use database functions, \n # because modifiers like timedelta do not work with sqlalchemy sql attributes)\n # TODO handle timezones (needed for sqlite strftime)\n duplicates = database.session.query(Workout)\\\n .filter(or_(and_(Workout.start_time < self.start_time,\n func.strftime('%s', Workout.start_time, 'utc') + Workout.duration_sec >= self.start_time.timestamp()),\n and_(Workout.start_time >= self.start_time,\n Workout.start_time < (self.start_time + datetime.timedelta(seconds=int(self.duration_sec))))))\\\n .filter(Workout.is_duplicate_with == None)\\\n .filter(Workout.manual_check_required_with == None)\\\n .all()\n\n if len(duplicates) <= 1: \n return (number_of_duplicates, number_of_merged)\n\n # find overlapping workouts of different sports -> set manual_check_required_with\n for duplicate in duplicates:\n if duplicate.sport_id != self.sport_id:\n self.manual_check_required_with = duplicate.id\n logger.debug(\"dup check - workout marked to be checked: {}\".format(duplicate))\n duplicates.remove(duplicate)\n if len(duplicates) <= 1: \n return (number_of_duplicates, number_of_merged)\n\n # find overlapping workouts of same sports (they are duplicate workouts) -> now find the leading workout\n leading_workout = None\n # Step 1: if one of the duplicates is a previously merged one, use it as the leading workout\n for duplicate in duplicates:\n if duplicate.source and duplicate.source == \"MERGED WORKOUT\":\n leading_workout = duplicate\n logger.debug(\"Found leading workout in step 1: {}\".format(leading_workout))\n break\n # Step 2: else if one of the duplicates is from Zwift, prefer it as the leading workout\n if not leading_workout:\n for duplicate in duplicates:\n if duplicate.name and \"Zwift\" in duplicate.name:\n leading_workout = duplicate\n logger.debug(\"Found leading workout in step 2: {}\".format(leading_workout))\n break\n # Step 3: else if one of the duplicates is a Garmin import, prefer it as the leading workout\n if not leading_workout:\n for duplicate in duplicates:\n if duplicate.source and \"Garmin\" in duplicate.source:\n leading_workout = duplicate\n logger.debug(\"Found leading workout in step 3: {}\".format(leading_workout))\n break\n # Step 4: else use this workout as the leading workout\n if not leading_workout:\n leading_workout = self\n logger.debug(\"Found leading workout in step 4: {}\".format(leading_workout))\n\n # create a new workout that will be treated as the leading one. Mark the duplicates \n if leading_workout.source == \"MERGED WORKOUT\":\n merged_workout = leading_workout\n else:\n merged_workout = Workout(source=\"MERGED WORKOUT\", external_id=datetime.datetime.now().timestamp())\n number_of_merged += 1\n merged_workout._merge_attributes(leading_workout)\n logger.debug(\"dup check - merged workout with leading: {}\".format(merged_workout))\n merged_workout.add(database)\n leading_workout.is_duplicate_with = merged_workout.id\n number_of_duplicates += 1\n\n for duplicate in duplicates:\n if duplicate is leading_workout:\n # already merged above\n continue\n if duplicate.is_duplicate_with == merged_workout.id:\n # already merged\n continue\n merged_workout._merge_attributes(duplicate)\n logger.debug(\"dup check - merged workout duplicate: {}\".format(merged_workout))\n duplicate.is_duplicate_with = merged_workout.id\n number_of_duplicates += 1\n logger.debug(\"dup check - duplicate workout marked: {}\".format(duplicate))\n\n return (number_of_duplicates, number_of_merged)",
"def filter_dups(saved_home, dups_info_home):\n orig_context_file = open(os.path.join(saved_home, 'data_for_corenlp', 'kp20k_training_context_for_corenlp.txt'),\n encoding='utf-8')\n context_lines = orig_context_file.readlines()\n orig_allkeys_file = open(os.path.join(saved_home, 'data_for_corenlp', 'kp20k_training_keyword_for_corenlp.txt'),\n encoding='utf-8')\n allkeys_lines = orig_allkeys_file.readlines()\n assert len(context_lines) == len(allkeys_lines)\n\n # filter out the duplicates in the validation and the testing datasets and the kp20k training dataset itself\n dups_info_datasets = ['kp20k_training', 'kp20k_validation', 'kp20k_testing',\n 'inspec_testing', 'krapivin_testing',\n 'nus_testing', 'semeval_testing']\n total_filtered_idx_set = set()\n for dataset in dups_info_datasets:\n filtered_idx_set = set()\n dups_info_file = open(\n os.path.join(dups_info_home, '{}_context_nstpws_dups_w_kp20k_training.txt'.format(dataset)), encoding='utf-8')\n for line in dups_info_file:\n line = line.strip()\n # inspec_testing_48 kp20k_training_433051 jc_sc:0.7368; affine invariants of convex polygons | affine invariants of convex polygons\n dups, titles = line.split(';')\n src_dup, filtered_dup, _ = dups.split()\n src_idx = int(src_dup.strip().split('_')[-1])\n filtered_idx = int(filtered_dup.strip().split('_')[-1])\n if dataset != 'kp20k_training':\n filtered_idx_set.add(filtered_idx)\n else:\n if src_idx not in filtered_idx_set:\n filtered_idx_set.add(filtered_idx)\n total_filtered_idx_set = total_filtered_idx_set.union(filtered_idx_set)\n print('Num of filtered kp20k training data: {}'.format(len(total_filtered_idx_set)))\n\n # also filter out the invalid data samples\n print('Finding the invalid data samples in the original kp20k training ...')\n for corpus_idx in tqdm(range(len(context_lines))):\n if context_lines[corpus_idx].strip().split() == [''] or allkeys_lines[corpus_idx].strip().split(' ; ') == ['']:\n total_filtered_idx_set.add(corpus_idx)\n print('Num of filtered kp20k training data: {}'.format(len(total_filtered_idx_set)))\n\n total_filtered_idxes = sorted(list(total_filtered_idx_set))\n for filter_idx in total_filtered_idxes:\n context_lines[filter_idx] = '\\n'\n allkeys_lines[filter_idx] = '\\n'\n\n filtered_context_file = open(os.path.join(saved_home, 'data_for_corenlp',\n 'kp20k_training_context_for_corenlp_filtered.txt'),\n 'w', encoding='utf-8')\n filtered_context_file.writelines(context_lines)\n\n filtered_allkeys_file = open(os.path.join(saved_home, 'data_for_corenlp',\n 'kp20k_training_keyword_for_corenlp_filtered.txt'),\n 'w', encoding='utf-8')\n filtered_allkeys_file.writelines(allkeys_lines)\n\n orig_context_file = open(os.path.join(saved_home, 'data_for_corenlp',\n 'kp20k_training_filtered_for_corenlp_idxes.txt'),\n 'w', encoding='utf-8')\n orig_context_file.write(' '.join([str(idx) for idx in total_filtered_idxes]) + '\\n')\n orig_context_file.write(str(len(total_filtered_idxes)) + '\\n')",
"def find_duplicate(student_list):\r\n place_holder = student_info('null', 'null', '0', '0')\r\n current = place_holder\r\n dupe = []\r\n final = []\r\n for student in student_list:\r\n previous = current\r\n current = student\r\n if current.first == previous.first:\r\n if previous in final:\r\n dupe.append(final.pop())\r\n dupe.append(student)\r\n elif current.first != previous.first:\r\n if len(dupe) > 1:\r\n dupe.sort(key=lambda x: x[1])\r\n for student_dupe in dupe:\r\n final.append(student_dupe)\r\n final.append(student)\r\n dupe = []\r\n else:\r\n final.append(student)\r\n if len(dupe) > 1:\r\n dupe.sort(key=lambda x: x[1])\r\n for student_dupe in dupe:\r\n final.append(student_dupe)\r\n for student_final in final:\r\n print(student_format(student_final))",
"def replicates_for_assay(assay):\n reps = set()\n for tissue in LIBRARIES[assay]:\n reps.update(LIBRARIES[assay][tissue])\n return sorted(list(reps))",
"def find_duplicates(lst):\n \"*** YOUR CODE HERE ***\"\n return len( set(lst) ) != len(lst)",
"def check_for_dups(publication_list: List[dict]):\n checked = []\n duplicates = []\n for pub in publication_list:\n if pub[\"title\"].lower() not in [p[\"title\"].lower() for p in checked]:\n checked.append(pub)\n else:\n for ind, p in enumerate(checked):\n if p[\"title\"].lower() == pub[\"title\"].lower():\n duplicates.append(pub[\"title\"])\n checked[ind] = pick_best(p, pub)\n break\n return checked, duplicates",
"def list_dups(exproot, **kwargs):\n seen_args = []\n seen_names = []\n for jobname, args, results in load_all(exproot):\n if args in seen_args:\n print jobname, 'is dup of', seen_names[seen_args.index(args)]\n elif args != None:\n seen_args.append(args)\n seen_names.append(jobname)",
"def duplicates_marked_reciprocally():\n ids = FRAMEWORKS_DF['CURATED-COFs ID'].str\n messages = []\n\n for _index, row in FRAMEWORKS_DF.iterrows():\n if row['Duplicate found'] != 'none':\n original_id = row['CURATED-COFs ID']\n duplicate_id = row['Duplicate found']\n duplicate_row = FRAMEWORKS_DF.loc[FRAMEWORKS_DF['CURATED-COFs ID'] == duplicate_id ]\n if not len(duplicate_row) == 1:\n messages.append(f'Found row without reciprocal duplicate mark:\\n{row}')\n\n duplicate_row_original_id = duplicate_row['Duplicate found'].values[0]\n if not duplicate_row['Duplicate found'].values[0] == original_id:\n messages.append(f'Duplicate row lists ID {duplicate_row_original_id}, expected {original_id}')\n\n if messages:\n print('\\n'.join(messages))\n sys.exit(1)\n\n print('Rows marked as duplicates go both ways.')",
"def test_duplicated_gaitid(self):\n idaa_index = 6\n\n upload_program = program.ProgramUpload(idaa_program=self.idaa_json['value'][idaa_index]['fields'],\n msr_country_codes_list=msr_country_codes_list, msr_gaitid_list=msr_gaitid_list, duplicated_gaitids=self.duplicated_gaitids\n )\n\n self.assertFalse(upload_program.is_valid())\n self.assertTrue(upload_program.has_discrepancy('duplicate_gaitid'))",
"def test_identify_duplicates_2(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = \"none\"\n ticket1.type = \"replace\"\n ticket1.phage_id = \"none\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = \"none\"\n ticket2.type = \"replace\"\n ticket2.phage_id = \"none\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)",
"def check_duplicates():\n print \"Building data set...\\n\"\n\n rows = db.links.find()\n seen = set()\n count = 0\n for row in rows:\n value = hashlib.md5(row['body'].encode('utf8')).hexdigest()\n if value in seen:\n count += 1\n print row['category'], row['_id']\n # db.links.remove({'_id': row['_id']})\n else:\n seen.add(value)\n print count, 'duplicate(s)'\n print \"-------------------\\n\"",
"def duplicates(deleteFlag=False,cnst='1'):\n output = db.query(['fwid','strjob'],cnst) # list of (fwid,strjob) pairs\n rptDict={} # dictionary for repeat values (key = first fwid, value = list of duplicates)\n for fwid,strjob in output:\n for f,s in output: # double-FOR loop\n if f is None: print 'NONE FWID??? ',f,s # hopefully this isn't flagged\n if strjob == s and f!=fwid: # condition for duplicate job\n if fwid not in list(itertools.chain.from_iterable(rptDict.values())): \n if fwid in rptDict.keys(): rptDict[fwid].append(f) # add to the list\n else: rptDict[fwid] = [f] # create a key,value pair\n print 'FWIDs with equal strjob entries: \\n',abbreviateDict(rptDict) # summarize results\n if deleteFlag:\n delfws = list(itertools.chain.from_iterable(rptDict.values()))\n if ask('Are you sure you want to delete %d duplicates?'%len(delfws)):\n for f in delfws: delete('fwid = %d'%f,False)",
"def _FindDuplicates(self):\n findings = []\n for subset in self._complete_field_sets_oi:\n if not subset:\n continue\n typenames = self._complete_field_sets_oi[subset]\n if len(typenames) < 2:\n continue\n\n for typename in typenames:\n entity_type = self._GetTypeByName(typename)\n\n # Filter duplicates where the two types inherit from one another\n other_typenames = typenames.difference([typename])\n not_related = {}\n qparents_by_child = {}\n for other_name in other_typenames:\n if other_name not in qparents_by_child:\n other_type = self._GetTypeByName(other_name)\n qparents_by_child[other_name] = other_type.parent_names\n if (typename not in qparents_by_child[other_name] and\n other_name not in entity_type.parent_names):\n\n optionality_compatible = True\n optionality_changes = 0\n for field in entity_type.GetAllFields():\n parent_field = entity_type.GetAllFields()[field]\n other_field = other_type.GetAllFields()[field]\n if not parent_field.optional and other_field.optional:\n optionality_compatible = False\n break\n if parent_field != other_field:\n optionality_changes += 1\n if not optionality_compatible:\n continue\n not_related[other_name] = optionality_changes\n\n if not not_related:\n continue\n key_list = list(not_related.keys())\n key_list.append(typename)\n finding = findings_lib.DuplicateExpandedFieldSetsWarning(\n entity_type, not_related, frozenset(key_list))\n entity_type.AddFinding(finding)\n findings.append(finding)\n return findings",
"def __delete_duplicates(self):\n log = logging.getLogger()\n log.debug(\"\\n---> Duplicate check <---\")\n\n chromosomes = list(set(self.chromosomes))\n diff = self.size - len(chromosomes)\n\n if diff > 0:\n log.debug(\"---> Duplicate(s) found! <---\")\n for i in range(diff):\n chromosomes.append(\n Chromosome(self.__generate_random_gene_sequence(),\n self.environment))\n else:\n log.debug(\"---> No duplicates found! <---\")\n\n self.chromosomes = chromosomes",
"def _check_duplicates(self):\n\n duplicates = set()\n for name in self.genes:\n if len(self.genes[name]) > 1:\n duplicates.add(name)\n\n for name in sorted(duplicates):\n\n original_name = self.genes[name][0].name\n instances = ['(\"%s\"/%s)' % (\n x.microplate_name, x.well_name) for x in self.genes[name]\n ]\n\n message = 'Warning: duplicate gene \"%s\" at %s'\n print >> sys.stderr, message % (original_name, ', '.join(instances))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Display the duplication check results. | def display_dup(dup_result):
lines = [k + ": " + ", ".join(v) for k, v in dup_result]
return lines | [
"def show_duplicates(l,name):\n\tdups = [x for x,y in collections.Counter(l).items() if y > 1]\n\tif len(dups) > 0:\n\t\tprint str(name),': duplicate combination(s) found for the following:'\n\t\tfor d in dups:\n\t\t\tprint_red('\\t',str(d))\n\t\treturn False\n\treturn True",
"def printOverviewDuplicates(self):\n\n if self._collHpssDuplicates.count() == 0:\n return\n\n print('\\n==---------------------------------------------------------==')\n print('Collection:', self._collHpssDuplicates.name)\n print('==---------------------------------------------------------==')\n\n self._printOverviewLevelPicoDst(self._collHpssDuplicates, 1, {})",
"def printDistinct(self):\n\n print('\\n==---------------------------------------------------------==')\n print('Collection:', self._collHpssFiles.name)\n print('==---------------------------------------------------------==')\n\n self._printListOfUniqEntries(self._collHpssFiles, 'fileType')\n\n print('\\n==---------------------------------------------------------==')\n print('Collection:', self._collHpssPicoDsts.name)\n print('==---------------------------------------------------------==')\n\n for key, value in self._fields.items():\n self._printListOfUniqEntries(self._collHpssPicoDsts, value)\n\n for key, value in self._fieldsExtra.items():\n if key == 1:\n print(' Unique Entries in field:', value)\n print (' ', sorted(self._collHpssPicoDsts.distinct(value)))\n elif key >= 3:\n self._printListOfUniqEntries(self._collHpssPicoDsts, value)\n\n if self._collHpssDuplicates.count() == 0:\n return\n\n print('\\n==---------------------------------------------------------==')\n print('Collection:', self._collHpssDuplicates.name)\n print('==---------------------------------------------------------==')\n\n for key, value in self._fields.items():\n self._printListOfUniqEntries(self._collHpssDuplicates, value)\n\n for key, value in self._fieldsExtra.items():\n if key == 1:\n print(' Unique Entries in field:', value)\n print (' ', sorted(self._collHpssDuplicates.distinct(value)))\n elif key >= 3:\n self._printListOfUniqEntries(self._collHpssDuplicates, value)",
"def _check_duplicates(self):\n\n duplicates = set()\n for name in self.genes:\n if len(self.genes[name]) > 1:\n duplicates.add(name)\n\n for name in sorted(duplicates):\n\n original_name = self.genes[name][0].name\n instances = ['(\"%s\"/%s)' % (\n x.microplate_name, x.well_name) for x in self.genes[name]\n ]\n\n message = 'Warning: duplicate gene \"%s\" at %s'\n print >> sys.stderr, message % (original_name, ', '.join(instances))",
"def dupe_list():\r\n randlist = []\r\n i = 0\r\n prev = 0\r\n duplicates = 0\r\n duplicatelist = []\r\n while i < 30:\r\n randlist.append(random.randint(0, 50))\r\n i += 1\r\n\r\n for num in randlist:\r\n if num == prev:\r\n print(\"%d is a duplicate of the previous number.\" % num)\r\n duplicatelist.append(num)\r\n duplicates += 1\r\n prev = num\r\n\r\n if duplicates > 1:\r\n print(\"There are %d duplicates in the list.\" % duplicates)\r\n elif duplicates == 1:\r\n print(\"There is %d duplicate in the list.\" % duplicates)\r\n else:\r\n print(\"There are no duplicates in the list.\")\r\n\r\n if duplicates > 0:\r\n for num in duplicatelist:\r\n print(\"%d \" % num, end='')",
"def test_display_name(self):\r\n def verify_name(source_usage_key, parent_usage_key, expected_name, display_name=None):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key, display_name)\r\n duplicated_item = self.get_item_from_modulestore(usage_key, draft=True)\r\n self.assertEqual(duplicated_item.display_name, expected_name)\r\n return usage_key\r\n\r\n # Display name comes from template.\r\n dupe_usage_key = verify_name(self.problem_usage_key, self.seq_usage_key, \"Duplicate of 'Multiple Choice'\")\r\n # Test dupe of dupe.\r\n verify_name(dupe_usage_key, self.seq_usage_key, \"Duplicate of 'Duplicate of 'Multiple Choice''\")\r\n\r\n # Uses default display_name of 'Text' from HTML component.\r\n verify_name(self.html_usage_key, self.seq_usage_key, \"Duplicate of 'Text'\")\r\n\r\n # The sequence does not have a display_name set, so category is shown.\r\n verify_name(self.seq_usage_key, self.chapter_usage_key, \"Duplicate of sequential\")\r\n\r\n # Now send a custom display name for the duplicate.\r\n verify_name(self.seq_usage_key, self.chapter_usage_key, \"customized name\", display_name=\"customized name\")",
"def print_num_uniques(self):\n print 'There are', self.num_uniques, 'unique cabs in this dataset.'",
"def _render_results_scan_summary(self):\n\n core.add_text(\n 'Scan Summary',\n color=self._control_text_color,\n parent=self._window_name)\n\n core.add_text(\n 'Number of images scanned: ',\n parent=self._window_name)\n\n core.add_same_line(parent=self._window_name)\n\n core.add_text(\n name='number_of_scanned_images_text',\n source=NUMBER_OF_SCANNED_IMAGES,\n parent=self._window_name)\n\n core.add_text(\n 'Number duplicate image sets: ',\n parent=self._window_name)\n\n core.add_same_line(parent=self._window_name)\n\n core.add_text(\n str(len(self._duplicates_list)),\n parent=self._window_name)\n\n core.add_text('', parent=self._window_name)",
"def check_duplicates():\n print \"Building data set...\\n\"\n\n rows = db.links.find()\n seen = set()\n count = 0\n for row in rows:\n value = hashlib.md5(row['body'].encode('utf8')).hexdigest()\n if value in seen:\n count += 1\n print row['category'], row['_id']\n # db.links.remove({'_id': row['_id']})\n else:\n seen.add(value)\n print count, 'duplicate(s)'\n print \"-------------------\\n\"",
"def print_duplicates(md):\n for digest,paths in md.iteritems():\n for p in paths:\n print digest, p\n # print blank line between groups\n print \"\"",
"def print_results(self):\n pass",
"def show_fixture_duplicates(config):\n from _pytest.main import wrap_session\n return wrap_session(config, _show_fixture_duplicates_main)",
"def _print_results(self, show_passed):\n\n # Spacing from log messages above\n console.print(\"\")\n\n log.debug(\"Printing final results\")\n\n # Helper function to format test links nicely\n @group()\n def format_result(test_results):\n \"\"\"\n Given an list of error message IDs and the message texts, return a nicely formatted\n string for the terminal with appropriate ASCII colours.\n \"\"\"\n for eid, msg in test_results:\n tools_version = __version__\n if \"dev\" in __version__:\n tools_version = \"latest\"\n yield Markdown(\n f\"[{eid}](https://nf-co.re/tools/docs/{tools_version}/pipeline_lint_tests/{eid}.html): {msg}\"\n )\n\n # Table of passed tests\n if len(self.passed) > 0 and show_passed:\n console.print(\n rich.panel.Panel(\n format_result(self.passed),\n title=rf\"[bold][✔] {len(self.passed)} Pipeline Test{_s(self.passed)} Passed\",\n title_align=\"left\",\n style=\"green\",\n padding=1,\n )\n )\n\n # Table of fixed tests\n if len(self.fixed) > 0:\n console.print(\n rich.panel.Panel(\n format_result(self.fixed),\n title=rf\"[bold][?] {len(self.fixed)} Pipeline Test{_s(self.fixed)} Fixed\",\n title_align=\"left\",\n style=\"bright_blue\",\n padding=1,\n )\n )\n\n # Table of ignored tests\n if len(self.ignored) > 0:\n console.print(\n rich.panel.Panel(\n format_result(self.ignored),\n title=rf\"[bold][?] {len(self.ignored)} Pipeline Test{_s(self.ignored)} Ignored\",\n title_align=\"left\",\n style=\"grey58\",\n padding=1,\n )\n )\n\n # Table of warning tests\n if len(self.warned) > 0:\n console.print(\n rich.panel.Panel(\n format_result(self.warned),\n title=rf\"[bold][!] {len(self.warned)} Pipeline Test Warning{_s(self.warned)}\",\n title_align=\"left\",\n style=\"yellow\",\n padding=1,\n )\n )\n\n # Table of failing tests\n if len(self.failed) > 0:\n console.print(\n rich.panel.Panel(\n format_result(self.failed),\n title=rf\"[bold][✗] {len(self.failed)} Pipeline Test{_s(self.failed)} Failed\",\n title_align=\"left\",\n style=\"red\",\n padding=1,\n )\n )",
"def check(self):\n if not self.session:\n print(\"no database\")\n\n number_of_checked_workouts = 0\n number_of_merged_workouts = 0\n number_of_duplicate_workouts = 0\n workouts = self.session.query(Workout).all()\n for workout in workouts:\n number_of_checked_workouts += 1\n if workout.is_duplicate_with:\n number_of_duplicate_workouts += 1\n else:\n (a, b) = workout.handle_duplicates(self)\n number_of_duplicate_workouts += a\n number_of_merged_workouts += b\n logger.info('{} workouts checked, {} of them were duplicate, created {} merged workouts'\\\n .format(number_of_checked_workouts,\n number_of_duplicate_workouts,\n number_of_merged_workouts,))",
"def showClusters(self,clusterOfFiles,batchSize=3):\n #groupCounter keeps track of how many clusters of duplicate files has been printed\n clusterCounter=0\n for acluster in clusterOfFiles:\n #print a cluster/group of duplicate files\n print(\"Duplicate group {0}\".format(clusterCounter+1))\n print (\"All of these files have the same content:\")\n for afile in acluster:\n print(afile)\n \n #increase the groupCounter by 1 as one group has been printed\n clusterCounter+=1\n if clusterCounter%batchSize==0:\n raw_input(\"Press any key for more duplicates\")",
"def duplicates():\n dupes = ResultForm.objects.values(\n 'center', 'ballot', 'station_number').annotate(\n Count('id')).order_by().filter(id__count__gt=1).filter(\n center__isnull=False, ballot__isnull=False,\n station_number__isnull=False).exclude(form_state=FormState.UNSUBMITTED)\n\n pks = flatten([map(lambda x: x['id'], ResultForm.objects.filter(\n center=item['center'], ballot=item['ballot'],\n station_number=item['station_number']).values('id'))\n for item in dupes])\n\n return ResultForm.objects.filter(pk__in=pks)",
"def print_audit_postcode_results():\r\n\r\n missing_postcodes = round_rock_postcodes.difference(checked_postcode)\r\n if not missing_postcodes:\r\n print(\"\\nThere was at least one instance of each Round Rock postcode \"\\\r\n \"found in the data.\\n\")\r\n else:\r\n print(\"\\nThe following are Round Rock postcodes not found in the data.\\n\")\r\n print(missing_postcodes)\r\n if not_post_code:\r\n print(\"\\nThe following postcodes are not identified as Round Rock postcodes and need review.\\n\")\r\n print(not_post_code)\r\n print('\\n')",
"def _show_fixture_duplicates_main(config, session):\n session.perform_collect()\n curdir = py.path.local()\n\n fm = session._fixturemanager\n\n fixture_name = config.option.fixture_name\n available = defaultdict(list)\n arg2fixturedefs = ([fixture_name]\n if fixture_name and fixture_name in fm._arg2fixturedefs\n else fm._arg2fixturedefs)\n for item in session.items:\n for argname in arg2fixturedefs:\n fixturedefs = fm.getfixturedefs(argname, item.nodeid)\n assert fixturedefs is not None\n if not fixturedefs:\n continue\n\n for fixturedef in fixturedefs:\n loc = getlocation(fixturedef.func, curdir)\n\n fixture = (\n len(fixturedef.baseid),\n fixturedef.func.__module__,\n curdir.bestrelpath(loc),\n fixturedef\n )\n if fixture[2] not in [f[2] for f in available[argname]]:\n available[argname].append(fixture)\n\n if fixture_name:\n print_duplicates(fixture_name, available[fixture_name], None)\n else:\n available = sorted([(key, items) for key, items in available.items()], key=lambda key: key[0])\n\n previous_argname = None\n for argname, fixtures in available:\n print_duplicates(argname, fixtures, previous_argname)\n previous_argname = argname",
"def check_all(self):\n groups = self.get_all()\n merged_output = []\n for group in groups:\n output = self.test_group_duplicates(group.id)\n if output and self.json:\n merged_output += output\n elif output and not self.json:\n print \"Anti-affinity rules violated in Server Group:\",\\\n group.id\n print_table(output)\n if self.json and merged_output:\n print json.dumps(merged_output)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a response model to pass to the presenter | def _create_response_model(self, data):
pass | [
"def get_response_model_ctor(self):\n return self._response_model_ctor",
"def create_json_from_model(self):\n json = {\n \"enableAutoReply\": self.enable_auto_reply,\n \"responseSubject\": self.response_subject,\n \"responseBodyPlainText\": self.response_body_plain_text,\n \"responseBodyHtml\": self.response_body_html,\n \"restrictToContacts\": self.restrict_to_contacts,\n \"restrictToDomain\": self.restrict_to_domain,\n \"startTime\": self.start_time,\n \"endTime\": self.end_time\n }\n return json",
"def _create_response(self) -> str:",
"def create_flask_response(self):\n pass",
"def create_response(self, ):\n\t\tbuttons = None\n\t\tlast_message = self.messages[-1]\n\t\tlast_id = self.chat_ids[-1]\n\t\tif last_message in self.specials.special_cases:\n\t\t\treply = self.specials.generate_message(last_message)\n\t\telse:\n\t\t\tif self.planner.is_planning is True:\n\t\t\t\treply = self.planner.generate_message(last_message)\n\t\t\telse:\n\t\t\t\treply = self.understander.generate_message(last_message)\n\t\t\t\tif self.understander.new_planning is True:\n\t\t\t\t\treply = self.planner.generate_message(last_message, start=True)\n\t\t\t\t\tself.understander.new_planning = False\n\t\t\tif reply is None:\n\t\t\t\treply = self.did_not_get.generate_message()\n\n\t\t# unpack response\n\t\tresponse = reply[\"response\"]\n\t\tif \"buttons\" in reply:\n\t\t\tbuttons = reply[\"buttons\"]\n\t\tprint(reply)\n\t\tself.update_response(response, last_id, buttons)",
"def handle_create_response(self, response):\n\n if not self.model._meta['update_from_write'] or not response.content:\n return\n\n try:\n obj = self.obj_from_response(response)\n except ValueError:\n obj = None\n\n self.handle_response(response)\n\n return obj",
"def get_response(self, request):\n view = self.get_view()\n # Call its view with the request and this model.\n return view(request, flexible_page=self)",
"def createBaseResponseObject():\n\n out = dict()\n out['status'] = '1'\n out['results'] = []\n out['errors'] = []\n\n return out",
"def new_Response():\n req = flask.request.get_json()\n if not req:\n flask.abort(400, \"JSON improperly transmitted.\")\n # parse the response, give errors if needed\n try:\n title = req['title']\n content = handle_questions(req['content'], True)\n viewers = req['viewers']\n except KeyError:\n flask.abort(400, \"A required key was not included.\")\n # check owner key\n owner = auth(req)\n if not owner:\n flask.abort(400, \"Name or auth key incorrect.\")\n form_response = docs.Form(title=title,\n owner=owner,\n content=content,\n viewers=viewers)\n form_response.save()\n return flask.jsonify(form_response.to_json()), 201",
"def create_response(self, request, data, response_class=HttpResponse,\n **response_kwargs):\n\n desired_format = self.determine_format(request)\n options = request.GET.copy()\n options['username'] = request.user.username\n serialized = self.serialize(request, data, desired_format,\n options=options)\n return response_class(content=serialized,\n content_type=build_content_type(desired_format),\n **response_kwargs)",
"def create_response_element(self, **kwargs):\r\n return None",
"def create_model(self, data):\n model = Exchange()\n model.set_id(data[0])\n model.set_name(data[1])\n model.set_public(data[2])\n model.set_private(data[3])\n model.set_user_id(data[4])\n model.set_uid(data[5])\n model.set_pw(data[6])\n return model",
"def _create_response(self):\n self._buffer = parsers.ParserBuffer()",
"def response_cls(cls):\n return Response",
"def __init__(self):\n super(Response, self).__init__()",
"def __init__(self, resp_obj):\n self.resp_obj = resp_obj",
"def __init__(self, **kwargs):\n\n ## Error message\n self.error = ''\n ## Error code\n self.result = 0\n ## Apply passed keyword arguments to the Request object.\n super(ObjectDetectionLoadModels.Response, self).__init__(**kwargs)",
"def make_response(request, result):\n response = request.response\n response.text = json.dumps(result)\n return response",
"def sample_response(user, description='Sample description'):\n return Response.objects.create(user=user, description=description)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
takes in a string of columns and places alternating checkers in those columns, starting with 'X' For example, call b.setBoard('012345') to see 'X's and 'O's alternate on the bottom row, or b.setBoard('000000') to see them alternate in the left column. moveString must be a string of integers | def setBoard( self, moveString ):
nextCh = 'X' # start by playing 'X'
for colString in moveString:
col = int(colString)
if 0 <= col <= self.__width:
self.addMove(col, nextCh)
if nextCh == 'X': nextCh = 'O'
else: nextCh = 'X' | [
"def set_board(self, move_string):\r\n next_checker = 'X' # we starten door een 'X' te spelen\r\n for col_char in move_string:\r\n col = int(col_char)\r\n if 0 <= col <= self.width:\r\n self.add_move(col, next_checker)\r\n if next_checker == 'X':\r\n next_checker = 'O'\r\n else:\r\n next_checker = 'X'",
"def set_board(self, move_string):\n next_checker = 'X' # we starten door een 'X' te spelen\n for col_char in move_string:\n col = int(col_char)\n\n if 0 <= col <= self.width:\n self.add_move(col, next_checker)\n\n if next_checker == 'X':\n next_checker = 'O'\n else:\n next_checker = 'X'",
"def setBoard( self, moveString ):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.width:\n self.addMove(col, nextCh)\n if nextCh == 'X': nextCh = 'O'\n else: nextCh = 'X'",
"def set_board(self, move_string):\n next_side = \"X\"\n for col_string in move_string:\n col = int(col_string)\n if col >= 0 and col <= self.width:\n self.add_move(col, next_side)\n if next_side == \"X\":\n next_side = \"O\"\n else:\n next_side = \"X\"",
"def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction",
"def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n if DEBUG_UP:\n print self\n assert False, \"invalid direction: \" + direction",
"def board_from_string(boardstring):\n board = []\n for c in range(7):\n col = []\n for d in range(6):\n char = boardstring[c * 6 + d]\n col.append(int(char))\n board.append(col)\n return board",
"def make_move( char, row_index, col_index, game_board):\n \n select = get_position(row_index, col_index, get_board_size(game_board))\n #this gives the position when all of the parameters are written. It is a bit\n #lengthy to write several times so it is compiled into \"select\" \n #as a variable.\n new_board = game_board[:select] + char + game_board[select + 1:]\n #this puts in the char in between the position (select) and one space after\n #the position (select + 1)\n return new_board",
"def load_board(self, boardstring: str):\n\n boardstring = str(boardstring)\n\n for index, char in enumerate(boardstring):\n char = int(char)\n x = index % self.line_length\n y = math.floor(index / self.line_length)\n\n self.cells.append(Cell(self, x, y, char))\n\n print(\"Loaded board:\")",
"def make_board(board_string):\n\n letters = board_string.split()\n\n board = [\n letters[0:5],\n letters[5:10],\n letters[10:15],\n letters[15:20],\n letters[20:25],\n ]\n\n return board",
"def make_move(self, move: str) -> Tuple[bool, int]:\n\n a, b, c = move\n\n ax = self.validate_col(a)\n cx = self.validate_row(c)\n\n backup_board = self.board.copy()\n\n if b in letters:\n bx = self.validate_col(b)\n if abs(ax - bx) != 1:\n self.illegal_move(\"can only swap adjacent HYPERENTITIES\")\n self.swap_vertical(ax, bx, cx)\n else:\n bx = self.validate_row(b)\n if abs(cx - bx) != 1:\n self.illegal_move(\"can only swap adjacent HYPERENTITIES\")\n self.swap_horizontal(ax, bx, cx)\n\n accept, score = self.propagate_or_reject_swap()\n if not accept:\n # you lose a point every time you make a pointless swap\n self.board = backup_board\n return (False, -1)\n else:\n return True, score",
"def apply_move(b,player,move):\n move = move.strip().lower()\n if len(move)!=2:\n raise Exception(\"Valid move is two characters (e.g. A2 or B3)\")\n if move[0] not in COLS:\n move = move[::-1]\n if move[0] not in COLS:\n raise Exception(\"No column spec found\")\n j = COLS.index(move[0])\n i = int(move[1])-1\n if b[i][j] != \" \":\n raise Exception(\"Another move already filled that position\")\n b[i][j] = player",
"def addMove(self, column, checkerType):\n height = self.height\n width = self.width\n board = self.boardstate\n \n i = height - 1\n \n if column > width or column < 0:\n return False\n \n while board[i][int(column)] != '_' and i >= 0:\n i -= 1\n \n if i < 0:\n return False\n \n \n board[i][column] = checkerType",
"def make_move(self, board):",
"def test_go_for_win_column(self):\n state = [0,0,0,C,0,0,C,0,0]\n self.assertEquals(botMove(state),0)\n\n state = [C,0,0,0,0,0,C,0,0]\n self.assertEquals(botMove(state),3)\n\n state = [C,0,0,C,0,0,0,0,0]\n self.assertEquals(botMove(state),6)\n\n state = [0,0,0,0,C,0,0,C,0]\n self.assertEquals(botMove(state),1)\n\n state = [0,C,0,0,0,0,0,C,0]\n self.assertEquals(botMove(state),4)\n\n state = [0,C,0,0,C,0,0,0,0]\n self.assertEquals(botMove(state),7)\n\n state = [0,0,0,0,0,C,0,0,C]\n self.assertEquals(botMove(state),2)\n\n state = [0,0,C,0,0,0,0,0,C]\n self.assertEquals(botMove(state),5)\n\n state = [0,0,C,0,0,C,0,0,0]\n self.assertEquals(botMove(state),8)",
"def make_move(symbol, row, col, game_board):\n \n # TODO: Your code here\n # Hint: Use get_position function to get the postion\n # Hint: Can we do s[position] = symbol ? This is a string\n # We will have to construct the string by ourselves\n # Use string slices and + to concatenate\n # str[start:finish] + new_symbol + str[finish:end]\n pass",
"def board_from_string(s, to_move=\"White\",\r\n may_castle=(False, False), en_passant=None):\r\n d = {}\r\n i = 7\r\n for line in s.split(\"\\n\"):\r\n if len(line) < 8:\r\n continue\r\n for j, c in enumerate(line):\r\n if c not in \". \":\r\n d[i, j] = c\r\n i -= 1\r\n d[\"to_move\"] = to_move\r\n d[\"may_castle\"] = may_castle\r\n d[\"en_passant\"] = en_passant\r\n return d",
"def format_point(move):\n column_letters = \"ABCDEFGHJKLMNOPQRSTUVWXYZ\"\n #column_letters = \"abcdefghjklmnopqrstuvwxyz\"\n if move == PASS:\n return \"pass\"\n row, col = move\n if not 0 <= row < MAXSIZE or not 0 <= col < MAXSIZE:\n raise ValueError\n return column_letters[col - 1]+ str(row)",
"def test_board_coordinates_toXY():\r\n m = Move()\r\n for col_num, col_name in enumerate(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']):\r\n for row in range(1, 9):\r\n assert m.translate_to_xy(col_name + str(row)) == (Board.SIZE - row, col_num)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if AutoML can be loaded from a folder | def _check_can_load(self):
if self.results_path is not None:
# Dir exists and can be loaded
if os.path.exists(self.results_path) and os.path.exists(
os.path.join(self.results_path, "params.json")
):
self.load(self.results_path)
self._results_path = self.results_path | [
"def can_load_directory(cls, directory):\n return directory.file(\"__plugin__.py\").exists()",
"def can_load(cls, filename):\n return False",
"def load_files(self):\n\n # Load YAML files\n try:\n self.kprop = safe_load(open(\"resources/default.yml\", 'r'))\n self.lprop = safe_load(open(\"resources/\" + self.layout + \".yml\", 'r'))\n except IOError as fexc:\n print(fexc)\n return False\n except YAMLError as yexc:\n print(\"[Exception] Error in YAML files\", yexc)\n return False\n\n # Load DuckyScript\n try:\n h_file = open(self.script, 'r')\n self.data = h_file.read()\n h_file.close()\n except IOError as fexc:\n print(fexc)\n return False\n\n return True",
"def in_folder(self):\n return len(os.path.split(self.file_path)) > 1",
"def is_valid_animation(path, verbose=True):\n try:\n if \"idle\" in os.listdir(path) or \"transition\" in os.listdir(path):\n return True\n else:\n if verbose:\n print(path, \"is not a valid animation folder! It needs an /idle or /transition folder!\")\n return False\n except:\n return False",
"def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True",
"def is_file_loaded(self, path=\"\"):\n for f in self.FILES_TO_CHECK:\n if f.path == path:\n return True\n return False",
"def is_loaded(filename):\n global _loaded_files\n data_name = create_dataname(filename)\n if data_name in _loaded_files:\n return True\n else:\n return False",
"def __isValidXMLResourcesFolder(self, folder):\n tablesInFolder = filter(lambda f: os.path.isdir(os.path.join(folder, f)),\n os.listdir(folder))\n containedInRequiredTables = map(lambda f: f in self.__requiredTables,tablesInFolder)\n return (True if len(containedInRequiredTables)>0 else False)",
"def is_folder(self):",
"def is_component_runnable_example(component):\n if os.path.exists(get_srcdir(component) + os.sep + \"reana.yaml\"):\n return True\n return False",
"def isMayaFile(potentialMayaFile):\n\n pass",
"def in_machine_folder(self):\n return os.path.exists(os.path.join(self.current_path, \"config\"))",
"def validate(self):\n for filename in [self.rmd_file, self.blogdown_file]:\n if not path.exists(path.join(self.source_folder, filename)):\n click.secho('Missing file: {}'.format(filename), fg='red')\n sys.exit(1)",
"def check_if_anim_exist(name, ext=vext, figpath=figpath):\n return not(os.path.isfile(format_filename(name, ext, figpath)))",
"def isFromReferencedFile(self):\n \n pass",
"def __check_in_autonotes_dir():\n if not os.path.isfile('master.tex'):\n cli.log.error(f'I can\\'t find a {emph(\"master.tex\")} file, '\n 'are you inside an autonotes directory?')\n exit(3)",
"def is_entry_folder(path):\n glob_pattern = os.path.join(path, '*.md')\n main_candidates = glob.glob(glob_pattern)\n if(main_candidates):\n for candidate in main_candidates:\n return EntryFolder.detect_file_content(candidate)\n else:\n return False\n else:\n False",
"def isLoaded(self,fileName):\n return mwIniFile.isLoaded(fileName)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Append error message to errors.md file. | def _update_errors_report(self, model_name, error_msg):
errors_filename = os.path.join(self._get_results_path(), "errors.md")
with open(errors_filename, "a") as fout:
self.verbose_print(f"There was an error during {model_name} training.")
self.verbose_print(f"Please check {errors_filename} for details.")
fout.write(f"## Error for {model_name}\n\n")
fout.write(error_msg)
link = "https://github.com/mljar/mljar-supervised/issues/new"
fout.write(
f"\n\nPlease set a GitHub issue with above error message at: {link}"
)
fout.write("\n\n") | [
"def add_error(self, reference_id, error):\n\n with open('runReport.txt', 'a') as report:\n try:\n report.write(\"\\nError: \" + self.domain + \" \" + reference_id + \": \" + error)\n except Exception:\n report.write(\"\\nError: \" + self.domain + \" \" + reference_id)",
"def add_error(self, message):\n self.errors.append(message)",
"def save_error(self, text):\n f = open('error.html', 'w')\n f.write(text)\n f.close()",
"def add_error(self, content):\n self._add_content(html_error(content))",
"def write_errors(error_message):\n text_file = open(\"Error_file.txt\", \"a\")\n text_file.writelines(error_message)\n text_file.writelines(\"\\n\")\n text_file.close()",
"def add_error(self, message) -> None:\n self.count_errors += 1\n if self.max_errors > 0 and self.count_errors <= self.max_errors:\n self.message = self.message + f\" (e.g. {message})\"",
"def add_error(self, path, error):\n self.errors = merge_errors(self.errors, self._make_error(path, error))",
"def add_error(e):\r\n saved_errors.append(e)\r\n log('%-70s\\n' % e)",
"def error(self, line, message):\n sys.stderr.write(str(self._filename) + \":\" + str(line) + \": \" + str(message) + \"\\n\")\n self._errors += 1",
"def log_errors(self):\n with open(self.error_path, 'w') as f:\n for error in self.errors:\n f.write(error+'\\n')",
"def add_error(self, error):\n self.errors.append(error)",
"def errln(self, text):\n self.err(text + \"\\n\")",
"def error(self, msg, *args, **kwargs):\n self.add_report_entry(ERROR, msg, args, **kwargs)",
"def errMessage(self, msg):\n\t\ttry:\n\t\t\tself.errfd.write(\"DASReader: %s\\n\" % msg)\n\t\texcept Exception, e:\n\t\t\tsys.stderr.write(\"Error writing to file '%s': %s\\n\" % (self.errfd.name, str(e)))",
"def add_msg_error(self, msg):\n if msg not in self.msg_error:\n self.msg_error.append(msg)",
"def doc_errors(errors):\n return \" :raises: {0}\".format(', '.join(errors))",
"def add_errdir(self):\n os.rename(self.rundir[-1], self.rundir[-1] + \"_err.\" + str(len(self.errdir)))\n self.update_errdir()",
"def initialize_error_summary() -> str:\n error_summary = '\\nSummary of <span class=\"tex-fatal\">Critical Errors:</span>\\n\\n<ul>\\n'\n return error_summary",
"def error(self, msg):\n if self.current_line and self.current_file:\n msg = '{}\\nError in {} line {}'.format(\n msg, self.current_file, self.current_line)\n return self.DirectiveError(msg)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the current model_time_limit | def _get_model_time_limit(self):
self._validate_model_time_limit()
return deepcopy(self.model_time_limit) | [
"def time_limit(self) -> float:\n return self._time_limit",
"def time_limit(self) -> int:\n return self._time_limit",
"def time_limit(self):\n all_time_limit_updates = self.updates.exclude(\n time_limit_delta=timedelta())\n return self.time_limit_as_of_update(\n all_time_limit_updates.latest('id'))",
"def get_limit(self):\n return self.limit",
"def get_rate_limit():\n return getattr(g, \"_rate_limit\", None)",
"def after_contact_work_time_limit(self) -> int:\n return pulumi.get(self, \"after_contact_work_time_limit\")",
"def get_timeout(self):\n\n return self.timeout",
"def refund_time_limit(self):\n return self._refund_time_limit",
"def _get_max_suppress_time(self):\n return self.__max_suppress_time",
"def max_timeout(self):\n return self._max_timeout",
"def max_time(self) -> str:\n return self._max_time",
"def limit_period(self):\n return self._limit_period",
"def get_rate_limit(self):\n resp = self._session.get(self.API_ROOT + \"/rate_limit\")\n log.info(resp.text)",
"def MaxWaitTime(self):\r\n\t\treturn self._get_attribute('maxWaitTime')",
"def get_soft_limit():",
"def rate_limiting_resettime(self):\n if self.__requester.rate_limiting_resettime == 0:\n self.get_rate_limit()\n return self.__requester.rate_limiting_resettime",
"def storage_limit(self):\n return self._limit",
"def getTimeout():",
"def _get_time_interval_in_minutes(self):\n return self.visa.get_request_interval_in_minutes()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the current algorithms. If "auto" it is determined | def _get_algorithms(self):
self._validate_algorithms()
if self.algorithms == "auto":
if self._get_mode() == "Explain":
return [
"Baseline",
"Linear",
"Decision Tree",
"Random Forest",
"Xgboost",
"Neural Network",
]
if self._get_mode() == "Perform":
return [
"Linear",
"Random Forest",
"LightGBM",
"Xgboost",
"CatBoost",
"Neural Network",
]
if self._get_mode() == "Compete":
return [
"Decision Tree",
"Linear",
"Random Forest",
"Extra Trees",
"LightGBM",
"Xgboost",
"CatBoost",
"Neural Network",
"Nearest Neighbors",
]
if self._get_mode() == "Optuna":
return [
"Random Forest",
"Extra Trees",
"LightGBM",
"Xgboost",
"CatBoost",
"Neural Network",
]
else:
return deepcopy(self.algorithms) | [
"def algorithms():\n algorith_paradigms = ['Divide-and-conquer', 'Backtrackig', 'Greedy-Algorithms', 'Dynamic-programming']\n return algorith_paradigms",
"def algorithms(self):\n return [algorithm for algorithm in self.algorithm_handles]",
"def __get_algorithms():\n return hashlib.algorithms_available",
"def algorithms(self):\n if self._algorithms is None:\n uri = \"/loadbalancers/algorithms\"\n resp, body = self.method_get(uri)\n self._algorithms = [alg[\"name\"] for alg in body[\"algorithms\"]]\n return self._algorithms",
"def valid_algorithms():\n return ['UVIS', 'IR']",
"def _available_algorithms(**_: str) -> Set[str]:\n avail = set()\n pass2 = set()\n for algo in hashlib.algorithms_available:\n lalgo = algo.lower()\n if \"with\" in lalgo:\n continue # skip apparently redundant ones\n if lalgo != algo:\n pass2.add(algo)\n else:\n avail.add(lalgo)\n for algo in pass2:\n if algo.lower() not in avail:\n avail.add(algo)\n return avail",
"def __top_Algs_ ( self ) :\n _algs = self.TopAlg\n\n def _alg_name_ ( _n ):\n\n _p = _n.rfind('/')\n if 0 > _p : return _n\n return _n[_p:]\n\n def _pyAlg ( _n ) :\n for _a in self.pyalgorithms :\n if _n == _a.name() : return _a\n return None \n \n algs = [] \n for _a in _algs :\n # get the proper name \n _n = _alg_name_ ( _a )\n # check if it is pyalgorithm:\n _pa = _pyAlg ( _n )\n if _pa :\n algs += [ _pa ]\n else :\n _alg = self.algorithm ( _a , True )\n algs += [ _alg ]\n \n return algs",
"def __all_Algs_ ( self ) :\n _algs = self.algorithms()\n\n algs = []\n for _a in _algs :\n algs += [ self.algorithm ( _a ) ]\n return algs",
"def _default_options(self, module, algorithm):\r\n module = __import__(module, globals(), locals(), [algorithm], -1)\r\n algorithm = getattr(module, algorithm)\r\n \r\n return algorithm.get_default_options()",
"def get_session_algorithms(self): # real signature unknown; restored from __doc__\n return \"\"",
"def _loadAlgorithms(self):\n self.algs = self.alglist",
"def get_common_algorithm(external, prefered=None):\n if prefered is not None:\n if prefered in external:\n return prefered\n for alg in ALGORITHMS:\n if alg in external:\n return alg\n raise ValueError(\"No common algorithm found\")",
"def algorithm(self) -> Optional[pulumi.Input['GoogleCloudMlV1__HyperparameterSpecAlgorithm']]:\n return pulumi.get(self, \"algorithm\")",
"def test_get_algorithm_list(self):\n extloader = ExtensionLoader()\n pipeline = Pipeline(extloader.cats_container)\n standard = ['Bilateral Filter', 'Blur', 'Color enhancement', 'Fast nl Means Denoising', 'Fast nl Means Denoising Colored', 'Gaussian Blur', 'Invert Color', 'Median Blur']\n self.assertEqual(pipeline.get_algorithm_list(0),standard)",
"def getAlgoForOS():\n\ttheAlgo = None\n\tif sys.platform.startswith(\"linux\"):\n\t\ttheAlgo = str(\"-aes-256-ctr\")\n\telse:\n\t\ttheAlgo = str(\"-blowfish\")\n\treturn theAlgo",
"def _get_algorithm(name: str) -> Any:\n algo_cls = getattr(hashes, name.upper(), None) # hack: get class object by name\n if algo_cls is None:\n raise ValueError(f'Unsupported algorithm: hashes.{name}'.format(name=name.upper()))\n\n return algo_cls() # pylint: disable=not-callable",
"def _get_strategies(self) -> Dict[str, str]:\n strategies = [method for method in dir(self) if STRATEGY_IDENTIFIER in method]\n\n if not strategies:\n logger.warning(\n \"There are no strategy provided. \"\n \"Make sure the implemented strategy methods \"\n \"start contain the '%s' term.\" % STRATEGY_IDENTIFIER\n )\n return {str(n_method): method for n_method, method in enumerate(strategies)}",
"def get_algorithms(self) -> Dict[Type, Type[bcore._BackendAgent]]:\n return {easyagents.agents.DqnAgent: DebugAgent,\n easyagents.agents.PpoAgent: DebugAgent,\n easyagents.agents.RandomAgent: DebugAgent,\n easyagents.agents.ReinforceAgent: DebugAgent}",
"def _find_algorithm_instance(self, algorithm_name):\n algorithms_dict = self._find_list_of_algorithms_preprocessor(\"algorithms\", [])\n if algorithms_dict.get(algorithm_name, None):\n return algorithms_dict[algorithm_name]\n\n algorithms_found = \"\\n \".join(algorithms_dict)\n raise ValueError(\"Unable to find your algorithm, here is a list\"\n f\"of the algorithms we could find: \\n {algorithms_found}\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the current train_ensemble | def _get_train_ensemble(self):
self._validate_train_ensemble()
return deepcopy(self.train_ensemble) | [
"def ensemble(self):\n return self._ensemble",
"def train(self):\n return self._train",
"def train_environment(self):\n return self._train_environment",
"def getTrainSet(self):\r\n return self.fTrainData",
"def training_set(self):\n return self._training_set",
"def getTrainInstance(self): #NOTE: Probably faster way of doing this than additional 'if' statement every learning iteration\r\n return [self.currentTrainState, self.currentTrainPhenotype] #Return unadulterated training data\r",
"def get_train(self):\n return self.id_train, self.x_train, self.y_train",
"def estimator(self):\n if self._estimator is None:\n self.load_estimator()\n return self._estimator",
"def get_train_dataset(self):\n return self.train_dataset",
"def getEstimator(self) -> Estimator:\n return self.getOrDefault(self.estimator)",
"def getClassifier(self):\n if self._classifierModel is None:\n print('The run method must be launched first.')\n else:\n return self._classifierModel",
"def trainers ( self ) :\n return self.__trainers",
"def get_current_train_step(self):\n return self.optimizer.iterations.numpy()",
"def ensemble_name(self, index, namemode):\n ensemble = self.interface.get_ensemble_name(index, namemode)\n self.logger.debug(\"Returning ensemble name for channel index=\" + str(index) + \": \" + str(ensemble))\n return ensemble",
"def get_classifier(self):\n return self.rf",
"def get_train_examples(self):\n raise NotImplementedError()",
"def _ensemble():\n return {\n 'type' : 'class',\n 'name' : 'ensemble',\n 'base' : 'activity.numerical_activity',\n 'is_abstract' : False,\n 'is_entity' : True,\n 'doc' : 'An ensemble is made up of two or more simulations which are to be compared against each other to create ensemble statistics. Ensemble members can differ in terms of initial conditions, physical parameterisation and the model used. An ensemble bundles together sets of ensembleMembers, all of which reference the same Simulation(Run) and include one or more changes.',\n 'properties' : [\n ('meta', 'shared.doc_meta_info', '1.1', None),\n ('members', 'activity.ensemble_member', '1.N', None),\n ('types', 'activity.ensemble_type', '1.N', None),\n ('outputs', 'shared.data_source', '0.N', 'Points to the DataSource used to conform to a particular Requirement. This may be part of an activity::simulation or a software::component. It can be either a DataObject or a SoftwareComponent or a ComponentProperty. It could also be by using particular attributes of, say, a SoftwareComponent, but in that case the recommended practise is to reference the component and add appropriate text in the conformance description attribute.'),\n ('outputs_references', 'shared.doc_reference', '0.N', None),\n ],\n 'decodings' : [\n ('meta', 'self::cim:ensemble'),\n ('members', 'child::cim:ensembleMember'),\n ('types', 'child::cim:ensembleType/@value'),\n ('outputs', 'child::cim:output/cim:output/cim:dataObject', 'data.data_object'),\n ('outputs', 'child::cim:output/cim:output/cim:dataContent', 'data.data_content'),\n ('outputs', 'child::cim:output/cim:output/cim:componentProperty', 'software.component_property'),\n ('outputs', 'child::cim:output/cim:output/cim:softwareComponent', 'software.model_component'),\n ('outputs', 'child::cim:output/cim:output/cim:softwareComponent', 'software.processor_component'),\n ('outputs', 'child::cim:output/cim:output/cim:softwareComponent', 'software.statistical_model_component'),\n ('outputs_references', 'child::cim:output/cim:reference'),\n ]\n }",
"def _get_best_ensemble_index(self, current_iteration):\n\n # Skip the evaluation phase when there is only one candidate subnetwork.\n if len(current_iteration.candidates) == 1:\n tf.logging.info(\n \"As the only candidate, '%s' is moving onto the next iteration.\",\n current_iteration.candidates[0].ensemble_spec.name)\n return 0\n\n # The zero-th index candidate at iteration t>0 is always the\n # previous_ensemble.\n if current_iteration.number > 0 and self._force_grow and (len(\n current_iteration.candidates) == 2):\n tf.logging.info(\n \"As the only candidate with `force_grow` enabled, '%s' is moving\"\n \"onto the next iteration.\",\n current_iteration.candidates[1].ensemble_spec.name)\n return 1\n\n latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)\n tf.logging.info(\"Starting ensemble evaluation for iteration %s\",\n current_iteration.number)\n with tf.Session() as sess:\n init = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer(), tf.tables_initializer())\n sess.run(init)\n saver = tf.train.Saver(sharded=True)\n saver.restore(sess, latest_checkpoint)\n coord = tf.train.Coordinator()\n tf.train.start_queue_runners(sess=sess, coord=coord)\n if self._evaluator:\n adanet_losses = [\n c.ensemble_spec.adanet_loss for c in current_iteration.candidates\n ]\n adanet_losses = self._evaluator.evaluate_adanet_losses(\n sess, adanet_losses)\n else:\n adanet_losses = sess.run(\n [c.adanet_loss for c in current_iteration.candidates])\n values = []\n for i in range(len(current_iteration.candidates)):\n metric_name = \"adanet_loss\"\n ensemble_name = current_iteration.candidates[i].ensemble_spec.name\n values.append(\"{}/{} = {:.6f}\".format(metric_name, ensemble_name,\n adanet_losses[i]))\n tf.logging.info(\"Computed ensemble metrics: %s\", \", \".join(values))\n if self._force_grow and current_iteration.number > 0:\n tf.logging.info(\n \"The `force_grow` override is enabled, so the \"\n \"the performance of the previous ensemble will be ignored.\")\n # NOTE: The zero-th index candidate at iteration t>0 is always the\n # previous_ensemble.\n adanet_losses = adanet_losses[1:]\n index = np.argmin(adanet_losses) + 1\n else:\n index = np.argmin(adanet_losses)\n tf.logging.info(\"Finished ensemble evaluation for iteration %s\",\n current_iteration.number)\n tf.logging.info(\"'%s' at index %s is moving onto the next iteration\",\n current_iteration.candidates[index].ensemble_spec.name,\n index)\n return index",
"def set_ensemble(self,ensemble):\n self.ensemble=ensemble"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the current stack_models | def _get_stack_models(self):
self._validate_stack_models()
if self.stack_models == "auto":
val = self._get_validation_strategy()
if val.get("validation_type", "") == "custom":
return False
return True if self.mode in ["Compete", "Optuna"] else False
else:
return deepcopy(self.stack_models) | [
"def get_models(self):\n return self.ensemble.get_models()",
"def get_models(self):\n self.load()\n return self._models",
"def models(self):\n return self.config.models()",
"def get_models(self):\n\n base = self.get_base()\n return getattr(base, self.resource).json[\"api_declaration\"][\"models\"]",
"def models(self):\n return self._base.classes",
"def get_model_name_list(self):\n return self.model_name_list",
"def _get_models():\n from . import models\n return models",
"def getModel(self):\n return _libsbml.SBase_getModel(self)",
"def get_stack(self):\n return self.stack",
"def stacked_model(models):\n\n stack_m = [] \n for model, m in models.items(): \n stack_m.append((model, m))\n stack_model = StackingClassifier(estimators = stack_m, final_estimator = LogisticRegression(), cv = 3)\n models['stacked'] = stack_model\n \n return models",
"def get_stack(self):\n return self.__stack",
"def model(self):\n return self.workspace.model",
"def list_models(self):\n request = self.client.projects().models()\\\n .list(parent=self.parent)\\\n .execute()\n return request",
"def _getModel(self):\r\n \r\n return self._model",
"def get_models():\n return list(TFGraphBuilderFactory.__model_builders.keys())",
"def model_names(self):\n return self._models.keys()",
"def get_model(self):\n return self.model",
"def getTopModel(self):\n top = self.model\n while top.parent is not None:\n top = top.parent\n return top",
"def stack(self):\n if self._stack: return self._stack\n\n self._stack = self.execute\n stack_items = (copy(self.processors) +\n [(ModelBridge, {})] +\n copy(self.middlewares))\n stack_items.reverse()\n\n for item in stack_items:\n if not isinstance(item, (list, tuple)):\n item = tuple([item, {}])\n\n klass = item[0]\n config = item[1]\n\n self._stack = klass(self._stack, config)\n\n return self._stack"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the current validation_strategy | def _get_validation_strategy(self):
strat = {}
self._validate_validation_strategy()
if self.validation_strategy == "auto":
if self._get_mode() == "Explain":
strat = {
"validation_type": "split",
"train_ratio": 0.75,
"shuffle": True,
"stratify": True,
}
elif self._get_mode() == "Perform":
strat = {
"validation_type": "kfold",
"k_folds": 5,
"shuffle": True,
"stratify": True,
}
elif self._get_mode() in ["Compete", "Optuna"]:
strat = {
"validation_type": "kfold",
"k_folds": 10,
"shuffle": True,
"stratify": True,
}
if self._get_ml_task() == REGRESSION:
if "stratify" in strat:
# it's better to always check
# before delete (trust me)
del strat["stratify"]
return strat
else:
strat = deepcopy(self.validation_strategy)
if self._get_ml_task() == REGRESSION:
if "stratify" in strat:
del strat["stratify"]
return strat | [
"def getValidation(self):\n return self.validation_config",
"def get_validation_rule(self):\n return self.validation_rule",
"def get_strategy(self):\n return self.strategy",
"def validator(self):\n return self._validator",
"def paramValidationPref(self):\n # If the level of the object is below the Preference level,\n # recursively call base (super) classes to get preference at specified level\n return self.get_pref_setting_for_level(PARAM_VALIDATION_PREF, self._param_validation_pref.level)[0]",
"def get_validator(self):\n return self.get_validator_class()(**self.get_validator_kwargs())",
"def get_validator_class(self):\n return self.validator_class",
"def get_validation_schema(self):\n return self.validation_schema",
"def session_validator(self):\n return self.session.validator",
"def validation_frame(self):\n return self._parms.get(\"validation_frame\")",
"def validation_rules(self):\n return self._validation_rules",
"def validation_id(self):\n return self._validation_id",
"def active_validation(self) -> bool:\n return self._active_validation",
"def get_validator_class(self):\n validator_class = self.oauth_validator_class\n if validator_class is not None:\n return validator_class\n return oauth_api_settings.DEFAULT_VALIDATOR_CLASS",
"def get_validator(cls):\n cls.validator.model = cls\n return cls.validator or SageValidator",
"def validation_required(self):\n return self._validation_required",
"def get_parameter_validator(cls) -> EnsureCommandParameterization | None:\n return cls._validator_",
"def validation_type(self) -> Optional[str]:\n return pulumi.get(self, \"validation_type\")",
"def inductive_validation(self) -> Optional[TriplesFactory]: # type:ignore # noqa: D401\n if not self._loaded:\n self._load()\n assert self._inductive_validation is not None\n return self._inductive_validation"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits