query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Write the concordance entries to the output file(filename) See sample output files for format.
def write_concordance(self, filename): all_keys = self.concordance_table.get_all_keys() lines = [] for i in all_keys: a = "" a += i + ":" f = self.concordance_table.get_value(i) if f != None: for s in f: a += " " + str(s) a += "\n" lines.append(a) a = open(filename, "w+") for i in lines: a.write(i) a.close()
[ "def write_concordance(self, filename):\r\n key_list = self.concordance_table.get_all_keys()\r\n key_list.sort()\r\n write_text = ''\r\n for x in range(0,len(key_list)):\r\n values = self.concordance_table.get_value(key_list[x])\r\n values_str = ''\r\n for y in range(0, len(values)):\r\n values_str += str(values[y]) + ' '\r\n write_text += key_list[x] + ': ' + values_str[:len(values_str) - 1] + '\\n'\r\n write_text = write_text[:len(write_text) - 1]\r\n write_file = open(filename, 'w')\r\n write_file.write(write_text)\r\n write_file.close()", "def write_concordance(self, filename):\n out = ''\n values = [x for x in self.concordance_table.hash_table if x is not None]\n values.sort(key=lambda x: x[0])\n for v in values:\n out += f'{v[0]}: {\" \".join(str(x) for x in sorted(set(v[1])))}\\n' \n with open(filename, 'w') as f:\n f.write(out.rstrip())", "def write_concordance(self, filename):\n file = open(filename, \"w\")\n keys = self.concordance_table.get_all_keys()\n keys.sort()\n for i in range(len(keys)):\n index = self.concordance_table.get_index(keys[i])\n key = self.concordance_table.hash_table[index][0]\n values = self.concordance_table.hash_table[index][1]\n string_of_values = \"\"\n for j in values:\n string_of_values += \" \" + str(j)\n\n content = \"{0}:{1}\".format(key, string_of_values)\n\n if i == len(keys) - 1:\n file.write(content)\n else:\n file.write(content + \"\\n\")\n\n file.close()", "def write_cando_file(self, file_name):\n cando_writer = CandoWriter(self.dna_structure)\n cando_writer.write(file_name)", "def conllWrite(outputPath, sentences, headers):\n if not os.path.exists(os.path.dirname(outputPath)):\n os.makedirs(os.path.dirname(outputPath))\n fOut = open(outputPath, 'w')\n\n for sentence in sentences:\n fOut.write(\"#\")\n fOut.write(\"\\t\".join(headers))\n fOut.write(\"\\n\")\n for tokenIdx in range(len(sentence[headers[0]])):\n aceData = [sentence[key][tokenIdx] for key in headers]\n fOut.write(\"\\t\".join(aceData))\n fOut.write(\"\\n\")\n fOut.write(\"\\n\")", "def _write_conductances(self, cond_file_name):\n cond_file_path = os.path.join(OM_STORAGE_DIR, cond_file_name)\n\n #TODO: Check that the file doesn't already exist.\n LOG.info(\"Writing head conductance file: %s\" % cond_file_path)\n file_handle = file(cond_file_path, \"a\")\n\n file_handle.write(\"# Properties Description 1.0 (Conductivities)\\n\\n\")\n file_handle.write(\"Air %4.2f\\n\" % self.conductances[\"air\"])\n file_handle.write(\"Scalp %4.2f\\n\" % self.conductances[\"skin\"])\n file_handle.write(\"Brain %4.2f\\n\" % self.conductances[\"brain\"])\n file_handle.write(\"Skull %4.2f\\n\" % self.conductances[\"skull\"])\n\n file_handle.close()\n LOG.info(\"%s written successfully.\" % cond_file_path)\n\n return cond_file_path", "def write_conll_to_file(self, file_path):\n self.conll_file.write_conll(file_path)", "def write_CA_atoms():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n ca_list = []\n with open(filepath, 'r') as pdb:\n for line in pdb:\n if line[:4] == 'ATOM' and line[12:16] == \" CA \":\n line_split = line.split()[6:9]\n ca_list.append(line_split)\n choice1 = input('Enter name of the outfile: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in ca_list:\n outfile.writelines(i)\n print('Done!')\n print(i)", "def write_to_fasta(self, output_file):\n fw = FastaWriter(output_file)\n for file_path, template, complement in self.results:\n if template:\n header = \"{0} {1}\".format(file_path, \"template\")\n fw.write_entry(header, template)\n if complement:\n header = \"{0} {1}\".format(file_path, \"complement\")\n fw.write_entry(header, complement)", "def file_output(matches: list, output_file_name: str = 'matches.txt'):\n with open(\"test/Matches/\" + output_file_name, 'w') as f:\n for match in matches:\n for event in match.events:\n f.write(\"%s\\n\" % event.payload)\n f.write(\"\\n\")", "def write(self, filename):\n with open(filename, 'w') as f:\n for entry in self.entrys:\n f.write(self._entry_to_string(entry) + '\\n')\n logging.info('Wrote {0} entrys to file {1}'.format(\n len(self.entrys), filename))", "def write_antecedent_decisions_to_file(self, file):\n for document in self.documents:\n document.write_antecedent_decisions_to_file(file)", "def write_to_file(self):\n self._file_writer.write(self._reconstructed_sentences)", "def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()", "def write_corpus_to_file(output_file, corpus): \n \n file = open(output_file, 'w')\n for line in corpus: \n file.write(line)\n print ('Corpus has been writted in file')\n file.close()", "def write_file(self, filename):\n\n with open(filename, 'w', newline = '') as csvfile:\n langwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for key in self.features:\n value = self.features[key]\n l = []\n for val in value:\n l.append(str(val))\n langwriter.writerow([l])\n return", "def write_cn_cards(bc_file, bc_class):\n cn = bc_class.constituent_properties\n bc_file.write('! Constituent Properties\\n')\n if not cn.general_constituents.empty:\n # bc_file.write(cn.general_constituents.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.general_constituents.iterrows():\n bc_file.write(\n 'CN CON {} {}\\n'.format(row['ID'].astype('int'), row['CONC']))\n if not cn.sand.empty:\n # bc_file.write(cn.sand.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.sand.iterrows():\n bc_file.write(\n 'CN SND {} {} {} {} {}\\n'.format(row['ID'].astype('int'), *row[['C_0', 'C_1', 'C_2', 'C_3']].values))\n if not cn.clay.empty:\n # bc_file.write(cn.clay.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.clay.iterrows():\n bc_file.write(\n 'CN CLA {} {} {} {} {}\\n'.format(row['ID'].astype('int'), *row[['C_0', 'C_1', 'C_2', 'C_3']].values))\n if cn.salinity:\n bc_file.write('CN SAL {} {}\\n'.format(cn.salinity_id, cn.reference_concentration))\n if cn.temperature:\n bc_file.write('CN TMP {} {}\\n'.format(cn.temperature_id, cn.reference_temperature))\n if cn.vorticity:\n bc_file.write('CN VOR {} {} {} {}\\n'.format(cn.vorticity_id, cn.vorticity_normalization,\n cn.vorticity_as_term, cn.vorticity_ds_term))\n\n bc_file.write('\\n') # blank line at the end of the Constituent Properties", "def write_file(self):\n with open(self._file_name, 'w') as output_file:\n output_file.writelines([line + '\\n' for line in self._lines])", "def write_conformers(self, filename): # ccids):\n cnt = 0\n for confId in range(self.nconf): #ccids:\n w = Chem.SDWriter('%s_c%03d.sdf'%(filename,cnt+1))\n w.write(self.mol, confId=confId)\n w.flush()\n w.close()\n cnt += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds a kfactor circulant matrix (A matrix with the structure of circulant matrices, but with the entries above the diagonal multiplied by the same factor.) The matrix is store in memory.
def factor_circulant_matrix(x, k): n=len(x) return circulant(x) * (tri(n,n, 0) + k*np.transpose(tri(n,n, -1)))
[ "def generate_k_circulant(n: int, k: int):\n return nx.to_numpy_matrix(\n nx.generators.classic.circulant_graph(n, list(range(1, k + 1))),\n dtype=np.int64,\n )", "def _calc_k_matrix(self):\n el_len = self.coord_electrode.size\n h = float(np.diff(self.coord_electrode).min())\n\n c_jm1 = np.eye(el_len + 2, k=0) / h\n c_jm1[0, 0] = 0\n\n c_j0 = np.eye(el_len + 2) / h\n c_j0[-1, -1] = 0\n\n c_jall = c_j0\n c_jall[0, 0] = 1\n c_jall[-1, -1] = 1\n\n tjp1 = np.eye(el_len + 2, k=1)\n tjm1 = np.eye(el_len + 2, k=-1)\n\n tj0 = np.eye(el_len + 2)\n tj0[0, 0] = 0\n tj0[-1, -1] = 0\n\n # Defining K-matrix used to calculate e_mat1-3\n return np.dot(np.linalg.inv(np.dot(c_jm1, tjm1) +\n 2 * np.dot(c_jm1, tj0) +\n 2 * c_jall +\n np.dot(c_j0, tjp1)),\n 3 * (np.dot(np.dot(c_jm1, c_jm1), tj0) -\n np.dot(np.dot(c_jm1, c_jm1), tjm1) +\n np.dot(np.dot(c_j0, c_j0), tjp1) -\n np.dot(np.dot(c_j0, c_j0), tj0)))", "def form_factor_k_k(self, q):\n return self._ff_k_k.form_factor(q=q, couplings=self._couplings)", "def _K(m):\n M = m*(m - 1)/2\n K = np.zeros((M, m**2), dtype=np.int64)\n row = 0\n for j in range(1, m):\n col = (j - 1)*m + j\n s = m - j\n K[row:(row+s), col:(col+s)] = np.eye(s)\n row += s\n return K", "def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue", "def ckm(i,j):\n if i >= 1 and i <= 3 and j >= 1 and j <= 3:\n return _ckm_abs[i-1, j-1]\n else:\n raise(ValueError('Wrong generation index in CKM matrix: ({},{}).'.format(i,j)))", "def factor_circulant_multiplication(u, x, k=1):\n n = len(u) \n D_k = (k**(1/n))**np.arange(0,n)\n Lambda = fft(D_k*x)\n return (1/D_k)*real(ifft(Lambda*fft(D_k*u))) # y", "def clement(n, k=0):\n z = np.arange(1, n)\n\n if k == 0:\n a = np.diag(z[::-1], -1) + np.diag(z, 1)\n else:\n y = np.sqrt(z[::-1] * z)\n a = np.diag(y, -1) + np.diag(y, 1)\n\n return a", "def _diagk(X, k):\n X = np.asanyarray(X)\n s = X.shape\n if len(s) > 1:\n D = np.diag(X, k)\n else:\n D = np.array([])\n\n return D", "def build_k_by_k_matrix(k1, k2, n_clusters):\r\n\r\n m = np.zeros((n_clusters, n_clusters))\r\n for class1, class2 in zip(k1, k2):\r\n m[class1][class2] += 1\r\n print(m)\r\n return m", "def create_matrix(lengths, covs, kmers):\n\n C = _create_and_normalize_covs(covs, lengths)\n K = _create_and_normalize_kmers(kmers)\n\n return np.hstack((K,C))", "def kronecker_graph(g, k, add_self_edges=True, strip_self_edges=True):\n\n adj = nx.adjacency_matrix(g).todense()\n if add_self_edges:\n for i in range(len(adj)):\n adj[i, i] = 1\n mat = adj\n for i in range(k - 1):\n mat = np.kron(mat, adj)\n if strip_self_edges:\n for i in range(len(mat)):\n mat[i, i] = 0\n name = \"kronecker(%s, %s, %s, %s)\" % (\n g.name if g.name else hash(g), k, add_self_edges, strip_self_edges)\n return nx.Graph(mat, name=name)", "def dimension_cusp_forms(self, k, eps=0):\n if eps == 0:\n eps = self._sgn\n return self._weil_module.dimension_cusp_forms(k, eps)", "def nCkarray(*k_values):\n result = 1\n for i, j in enumerate((m for k in k_values for m in range(1, k+1)), 1):\n result = (result * i) // j\n return result", "def cdf(self, k):\n\n if k < 0 or k > self.n:\n return 0\n\n k = int(k)\n ans = 0\n for i in range(0, k + 1):\n ans += self.pmf(i)\n return ans", "def matrices(self):\n # Creating L\n L = scipy.sparse.diags((self.inv_dx2, -2*self.inv_dx2, self.inv_dx2, 1),\n (-(self.N+1), -self.N, -(self.N-1), self.N),\n shape=(2*self.N, 2*self.N), dtype=np.complex128)\n self.L = scipy.sparse.csr_matrix(L)\n self.L[-(self.N+1), 0], self.L[-1, -self.N] = 0, 0\n\n # Computing largest eigenvalue of L explicitely:\n self.mu_max = self.inv_dx*np.sqrt(2*(1 + np.cos(np.pi/(self.N+1))))\n\n # Creating K\n self.K = scipy.sparse.diags((-self.inv_dx2, 2*self.inv_dx2, -self.inv_dx2),\n (-1, 0, 1), # Diagonals\n shape=(self.N, self.N), # Size of matrix\n dtype=np.complex128)", "def CartanMatrix(ct):\n\n return CartanType(ct).cartan_matrix()", "def kronecker(self, value):\n if not (type(self) == type(value)):\n raise TypeError(\"Inappropriate argument type for kronecker product\")\n returnvalue = Matrix()\n for i in range(self._height):\n for j in range(value._height):\n newRow = list()\n for k in range(self._width):\n for l in range(value._width):\n newRow.append(self[i][k] * value[j][l])\n returnvalue.addRow(*newRow)\n return returnvalue", "def __factor_matrix(self, R, K, alpha, steps, beta, error_limit):\n # Transform regular array to numpy array\n R = numpy.array(R)\n\n # Generate P - N x K\n # Use random values to start. Best performance\n N = len(R)\n M = len(R[0])\n P = numpy.random.rand(N, K)\n\n # Generate Q - M x K\n # Use random values to start. Best performance\n Q = numpy.random.rand(M, K)\n Q = Q.T\n\n error = 0\n\n # iterate through max # of steps\n for step in xrange(steps):\n\n # iterate each cell in r\n for i in xrange(len(R)):\n for j in xrange(len(R[i])):\n if R[i][j] > 0:\n\n # get the eij (error) side of the equation\n eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])\n\n for k in xrange(K):\n # (*update_rule) update pik_hat\n P[i][k] = P[i][k] + alpha * (2 * eij * Q[k][j] - beta * P[i][k])\n\n # (*update_rule) update qkj_hat\n Q[k][j] = Q[k][j] + alpha * ( 2 * eij * P[i][k] - beta * Q[k][j] )\n\n # Measure error\n error = self.__error(R, P, Q, K, beta)\n\n # Terminate when we converge\n if error < error_limit:\n break\n\n # track Q, P (learned params)\n # Q = Products x feature strength\n # P = Users x feature strength\n self.Q = Q.T\n self.P = P\n\n self.__print_fit_stats(error, N, M)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the matrixvector product y = Cu where C is a kfactor circulant matrix All matrices are real
def factor_circulant_multiplication(u, x, k=1): n = len(u) D_k = (k**(1/n))**np.arange(0,n) Lambda = fft(D_k*x) return (1/D_k)*real(ifft(Lambda*fft(D_k*u))) # y
[ "def scalarMultiplication(self,c):\n matrixResult = [[complex.ComplexNumber(0,0) for x in range(self.m)] for y in range(self.n)] \n for i in range (self.m):\n for j in range (self.n):\n matrixResult[i][j]=self.mtx[i][j].multiplication(c)\n matResult = Matrix(matrixResult)\n return matResult", "def updateC(A, U, B):\n \n m_dim = A.shape[1] \n q_dim = B.shape[0]\n \n C_tensor = np.zeros((m_dim, m_dim, q_dim), dtype=np.complex)\n \n for k in range(q_dim):\n A_k = A[:, :, k]\n b_k = B[k]\n \n x_hat = U @ b_k\n y_hat = A_k.conj().T @ x_hat\n \n phase_y = np.exp(1j*np.angle(y_hat))\n #phase_y = np.sign(y_hat)\n C_k = np.diag(phase_y)\n C_tensor[:, :, k] = C_k\n \n \n return C_tensor", "def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product", "def factor_circulant_matrix(x, k):\n n=len(x)\n return circulant(x) * (tri(n,n, 0) + k*np.transpose(tri(n,n, -1)))", "def covar(fx,cx):\n \n fx = np.array(fx)\n cx = np.array(cx)\n \n shape_fx = fx.shape\n shape_cx = cx.shape\n \n \n if shape_fx[1] != shape_cx[0]:\n print('-----------------------------------------')\n print(\"Shapes of fx and cx cannot be multiplied:\")\n print(shape_fx,\"x\",shape_cx)\n print('-----------------------------------------')\n raise ValueError('Input matrices are not compliant')\n \n cy = np.dot(np.dot(fx,cx),fx.T)\n \n print(\"Size of Cy matrix: \",np.shape(cy))\n \n return cy", "def matmult(*x):\n return reduce(np.dot, x)", "def factors_multiple(self, U=None):\n factors = U.dot(self.C_)\n if self.C_bias_.shape[0]:\n factors[:] += self.C_bias_.reshape((1,-1))\n return factors", "def calc_CCuij(U, V):\n ## FIXME: Check for non-positive Uij's, 2009-08-19\n invU = linalg.inverse(U)\n invV = linalg.inverse(V)\n #invU = internal_inv3x3(U)\n #invV = internal_inv3x3(V)\n \n det_invU = linalg.determinant(invU)\n det_invV = linalg.determinant(invV)\n\n return ( math.sqrt(math.sqrt(det_invU * det_invV)) /\n math.sqrt((1.0/8.0) * linalg.determinant(invU + invV)) )", "def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue", "def c_matrix(x1,x2,x3):\n\tC = np.array([\t[\t2*(x2-x1), \t\t(x2-x1), \t\t\t0\t\t\t], \\\n\t\t\t\t\t[\t(x2-x1), \t\t2*(x3-x1), \t\t(x3-x2)\t\t], \\\n\t\t\t\t\t[\t0,\t\t\t\t(x3-x2),\t\t2*(x3-x2)\t] \t], \\\n\t\t\t\t\tfloat)\n\treturn(C)", "def cov(zc):\n s, NM = zc.shape\n C = np.dot(zc, zc.T.conj()) / NM\n\n return C.real # C should be real, as C = A A^T and A is real", "def _calc_ucf_fuc(self):\n ucf_mat = self._ucf_mat\n fuc_mat = self._fuc_mat\n tmp1 = self._tmp1\n c_layers = self._c_layers\n f_layers = self._f_layers\n depth, n = self.num_cnots, self._num_qubits\n\n # tmp1 = U^dagger.\n np.conj(self.target_matrix.T, out=tmp1)\n\n # ucf_mat = fuc_mat = U^dagger @ C = U^dagger @ C_{depth-1} @ ... @ C_{0}.\n self._ucf_mat.set_matrix(tmp1)\n for q in range(depth - 1, -1, -1):\n ucf_mat.mul_right_q2(c_layers[q], temp_mat=tmp1, dagger=False)\n fuc_mat.set_matrix(ucf_mat.finalize(temp_mat=tmp1))\n\n # fuc_mat = F @ U^dagger @ C = F_{n-1} @ ... @ F_{0} @ U^dagger @ C.\n for q in range(n):\n fuc_mat.mul_left_q1(f_layers[q], temp_mat=tmp1)\n\n # ucf_mat = U^dagger @ C @ F = U^dagger @ C @ F_{n-1} @ ... @ F_{0}.\n for q in range(n - 1, -1, -1):\n ucf_mat.mul_right_q1(f_layers[q], temp_mat=tmp1, dagger=False)", "def calculate_abc(self):\n (a,b,c, V) = crystal_calc.make_lattice_vectors(self.lattice_lengths, self.lattice_angles)\n #Now rotate all these vectors by the U matrix\n self.a = np.dot(self.u_matrix, a).reshape(1,3)\n self.b = np.dot(self.u_matrix, b).reshape(1,3)\n self.c = np.dot(self.u_matrix, c).reshape(1,3)", "def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z", "def compute_factors(self):\n self.O = np.zeros((self.u, self.k))\n self.A = np.zeros((self.v, self.k))\n self.C = np.zeros((self.n, self.k))\n\n for i in range(self.k):\n for j in range(self.u):\n self.O[j, i] = (\n (self.Nku[i, j] + self.alpha)\n / (self.Nu[j] + self.alpha * self.k))\n for j in range(self.v):\n self.A[j, i] = (\n (self.Nkv[i, j] + self.beta)\n / (self.Nk[i] + self.v * self.beta))\n for j in range(self.n):\n self.C[j, i] = (\n (self.Nkn[i, j] + self.gamma)\n / (self.Nk[i] + self.n * self.gamma))\n\n # print(self.O.sum(axis=1))\n # print(self.A.sum(axis=0))\n # print(self.C.sum(axis=0))\n return self.O, self.A, self.C", "def _C(self):\n\n # Find the local x and y coordinates at each node\n xi = 0\n yi = 0\n xj = self.width()\n yj = 0\n xm = xj\n ym = self.height()\n xn = 0\n yn = ym\n\n # Calculate the [C] coefficient matrix\n C = array([[1, xi, yi, xi**2, xi*yi, yi**2, xi**3, xi**2*yi, xi*yi**2, yi**3, xi**3*yi, xi*yi**3],\n [0, 0, 1, 0, xi, 2*yi, 0, xi**2, 2*xi*yi, 3*yi**2, xi**3, 3*xi*yi**2],\n [0, -1, 0, -2*xi, -yi, 0, -3*xi**2, -2*xi*yi, -yi**2, 0, -3*xi**2*yi, -yi**3],\n \n [1, xj, yj, xj**2, xj*yj, yj**2, xj**3, xj**2*yj, xj*yj**2, yj**3, xj**3*yj, xj*yj**3],\n [0, 0, 1, 0, xj, 2*yj, 0, xj**2, 2*xj*yj, 3*yj**2, xj**3, 3*xj*yj**2],\n [0, -1, 0, -2*xj, -yj, 0, -3*xj**2, -2*xj*yj, -yj**2, 0, -3*xj**2*yj, -yj**3],\n\n [1, xm, ym, xm**2, xm*ym, ym**2, xm**3, xm**2*ym, xm*ym**2, ym**3, xm**3*ym, xm*ym**3],\n [0, 0, 1, 0, xm, 2*ym, 0, xm**2, 2*xm*ym, 3*ym**2, xm**3, 3*xm*ym**2],\n [0, -1, 0, -2*xm, -ym, 0, -3*xm**2, -2*xm*ym, -ym**2, 0, -3*xm**2*ym, -ym**3],\n\n [1, xn, yn, xn**2, xn*yn, yn**2, xn**3, xn**2*yn, xn*yn**2, yn**3, xn**3*yn, xn*yn**3],\n [0, 0, 1, 0, xn, 2*yn, 0, xn**2, 2*xn*yn, 3*yn**2, xn**3, 3*xn*yn**2],\n [0, -1, 0, -2*xn, -yn, 0, -3*xn**2, -2*xn*yn, -yn**2, 0, -3*xn**2*yn, -yn**3]])\n \n # Return the coefficient matrix\n return C", "def mulc(self, c):\n new = []\n for i in range(len(self.values)):\n new.append(self.values[i] * c)\n return Vec3(new[0], new[1], new[2])", "def coriolis_matrix(self):\n dot1 = squeeze(asarray(dot(self.M[0:3, 0:3], self.v[0:3]) + dot(self.M[0:3, 3:6], self.v[3:6])))\n dot2 = squeeze(asarray(dot(self.M[3:6, 0:3], self.v[0:3]) + dot(self.M[3:6, 3:6], self.v[3:6])))\n s1 = self.s(dot1)\n s2 = self.s(dot2)\n c = zeros((6, 6))\n c[0:3, 3:6] = -s1\n c[3:6, 0:3] = -s1\n c[3:6, 3:6] = -s2\n return c", "def CalcCoriolisMatrix(self):\n q = self.plant.GetPositions(self.context)\n v = self.plant.GetVelocities(self.context)\n\n def Cv_fcn(v):\n self.plant_autodiff.SetPositions(self.context_autodiff, q)\n self.plant_autodiff.SetVelocities(self.context_autodiff, v)\n return self.plant_autodiff.CalcBiasTerm(self.context_autodiff)\n\n C = 0.5*jacobian(Cv_fcn,v)\n return C" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Solves Tx=b using the Levinson algorithm where T is apositivedefinite symmetric Toeplitz matrix b is a real vector
def levinson(r, b): n = len(b) y = zeros((n,)) x = zeros((n,)) # normalize the system so that the T matrix has diagonal of ones r_0 = r/r[0] b_0 = b/r[0] if n == 1: return b_0 y[0] = -r_0[1] x[0] = b_0[0] beta = 1 alpha = -r_0[1] for k in range(0,n-1): beta = (1 - alpha*alpha)*beta mu = (b_0[k+1] - dot(r_0[1:k+2], x[k::-1])) /beta x[0:k+1] = x[0:k+1] + mu*y[k::-1] x[k+1] = mu if k < n-2: alpha = -(r_0[k+2] + dot(r_0[1:k+2], y[k::-1]))/beta y[0:k+1] = y[0:k+1] + alpha * y[k::-1] y[k+1] = alpha return x
[ "def Backward_Euler_solver(func, mx, mt, L, T, kappa, u_0, u_T, bCond):\n x,_ = xt_points(mx, mt, L, T)\n u_j = U(func, x, L)\n u_jp1 = np.zeros(len(u_j))\n A_BE = tridiag_A(mx, mt, L, T, kappa)\n\n # Solve the PDE: loop over all time points\n for n in range(1, mt+1):\n # Backward Euler scheme in matrix form at inner mesh points\n #u_jp1 = np.linalg.solve(A_BE, u_j)\n u_jp1[1:-1] = ThomasSolver(A_BE, u_j[1:-1])\n # Boundary conditions\n u_jp1[0] = u_0; u_jp1[-1] = u_T\n # Update u_j\n u_j = u_jp1\n\n return x, u_j", "def SelfDualNewtonSystem(A, b, c, e):\n \n n = A.shape[1]\n m = A.shape[0]\n \n b_bar = b - np.matmul(A,e)\n c_bar = c - e\n alpha = 1 + np.dot(c, e)\n beta = n + 2\n \n A_star = np.c_[A,-b,b_bar]\n C = np.zeros((n+2,n+2))\n C[0:n,n] = c\n C[n,0:n] = -C[0:n,n].T\n C[0:n,n+1] = -c_bar\n C[n+1,0:n] = -C[0:n,n+1].T\n C[n,n+1] = alpha\n C[n+1,n] = -C[n,n+1].T\n \n yA = np.r_[np.zeros((m,m)), -A_star.T, np.zeros((n+2, m))]\n xA = np.r_[A_star, C, np.eye(n+2)]\n sA = np.r_[np.zeros((m, n+2)), -np.eye(n+2), np.eye(n+2)]\n \n return np.c_[yA, xA, sA]", "def stbinv(A, B, C, D, y, t):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[\n 1\n ] # the number of samples is the number of columns of the data matrix y\n\n # calculate system's dimensions: number of states and number of inputs\n m = B.shape[1] # number of inputs\n n = A.shape[0] # number of states\n\n # initialize the variable v (additional input)\n v = np.zeros((n, N)) # it will be important later\n\n # initializing the flag variable\n flag = 0\n # initializing the flag variable for the vrft method\n flag_vr = 0\n # initializing the counter of reduction steps done by the algorithm\n kround = 0\n\n # starting the loop of the reduction procedure\n while flag == 0:\n # run a step of the reduction order algorithm\n Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat = invredc(A, B, C, D, y, v)\n # increase the counter of reductions\n kround = kround + 1\n\n # preallocating the state vector of the inverse system\n xhat = np.zeros((nhat, N - kround)) # it must have N-kround samples\n # preallocating the calculated input\n uhat = np.zeros((m, N - kround))\n\n # defining the reduced time vector\n tt = t[:, 0 : N - kround]\n\n # test the conditions of invertibility\n if phat < m:\n # if this condition is true, then the algorithm has failed and it is not possible to find the inverse\n flag = 1\n flag_vr = 1\n # if this is the case, we print a message and end the execution\n # print('The inversion algorithm has failed')\n return uhat, tt, flag_vr\n else:\n if rhat == m:\n # ((rhat==m)&(rhat==phat)):\n # if this condition is true, then the algorithm is done. We can calculate the signal u\n flag = 2\n # calculating the inverse of the feedforward matrix\n # E=np.linalg.inv(Dhat)\n E = np.linalg.pinv(Dhat)\n else:\n # if none of the conditions above is true, then we need to proceed to another round of the reduction step of the algorithm\n A = Ahat\n B = Bhat\n C = Chat\n D = Dhat\n y = yhat\n v = vhat\n # after the reduction procedure is done, then the system can be inverted\n\n # calculating the dynamic matrix of the inverse system\n Ainv = Ahat - Bhat @ E @ Chat\n # eigenvalues of the inverse system's dynamic matrix\n wv, v = np.linalg.eig(Ainv) # w=eigenvalues, v=eigenvectors\n # calculating the input matrix of the inverse system\n Binv = Bhat @ E\n # calculating the output matrix of the inverse system\n Cinv = -E @ Chat\n # calculating the feedforward matrix of the inverse system\n Dinv = E\n\n # test if the inverse dynamic system is stable\n wbool = wv > 1\n wsum = np.sum(wbool)\n # test if wsum is greater than 1\n if wsum > 0:\n # if wsum is greater than 1, then, the inverse system is unstable, so we end the execution of the algorithm\n # print('The inverse system is unstable')\n flag_vr = 2\n return uhat, tt, flag_vr\n else:\n # if wsum=0, then the inverse system is stable, and we can calculate the input signal\n # calculate the first value for the output (t=0)\n uhat[:, 0] = Cinv @ xhat[:, 0] + Dinv @ yhat[:, 0]\n # calculate the states and the output of the inverse system\n for k in range(0, N - 1 - kround):\n xhat[:, k + 1] = Ainv @ xhat[:, k] + Binv @ yhat[:, k] + vhat[:, k]\n uhat[:, k + 1] = Cinv @ xhat[:, k + 1] + Dinv @ yhat[:, k + 1]\n\n return uhat, tt, flag_vr", "def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)", "def solve(matrix, b):\n lu_matrix = decompose_to_LU(matrix)\n # get supporting vector y\n y = np.matrix(np.zeros([lu_matrix.shape[0], 1]), dtype=np.float64)\n for i in range(y.shape[0]):\n y[i, 0] = b[i] - lu_matrix[i, :i] * y[:i]\n\n # get vector of answers x\n x = np.matrix(np.zeros([lu_matrix.shape[0], 1]))\n for i in range(1, x.shape[0] + 1):\n x[-i, 0] = (y[-i] - lu_matrix[-i, -i:] * x[-i:, 0]) / lu_matrix[-i, -i]\n\n return np.array(x.transpose()[0], dtype=np.float64)[0]", "def triangular_solve(rowlist, b):\n x = zero_vec(rowlist[0].D)\n for i in reversed(range(len(rowlist))):\n x[i] = (b[i] - rowlist[i] * x)/rowlist[i][i]\n return x", "def forward_committor_sensitivity(T, A, B, index):\n\n n = len(T)\n set_X = numpy.arange(n) # set(range(n))\n set_A = numpy.unique(A) # set(A)\n set_B = numpy.unique(B) # set(B)\n set_AB = numpy.union1d(set_A, set_B) # set_A | set_B\n notAB = numpy.setdiff1d(set_X, set_AB, True) # list(set_X - set_AB)\n m = len(notAB)\n\n K = T - numpy.diag(numpy.ones(n))\n\n U = K[numpy.ix_(notAB.tolist(), notAB.tolist())]\n\n v = numpy.zeros(m)\n\n # for i in xrange(0, m):\n # for k in xrange(0, len(set_B)):\n # v[i] = v[i] - K[notAB[i], B[k]]\n v[:] = v[:] - K[notAB[:], B[:]]\n\n qI = numpy.linalg.solve(U, v)\n\n q_forward = numpy.zeros(n)\n #q_forward[set_A] = 0 # double assignment.\n q_forward[set_B] = 1\n #for i in range(len(notAB)):\n q_forward[notAB[:]] = qI[:]\n\n target = numpy.eye(1, n, index)\n target = target[0, notAB]\n\n UinvVec = numpy.linalg.solve(U.T, target)\n Siab = numpy.zeros((n, n))\n\n for i in range(m):\n Siab[notAB[i]] = - UinvVec[i] * q_forward\n\n return Siab", "def ridge_regression(y, tx, lambda_):\n N = tx.shape[0]\n a = tx.T.dot(tx) + 2 * N * lambda_ * np.identity(tx.shape[1])\n b = tx.T.dot(y)\n w = np.linalg.solve(a, b)\n loss = compute_loss_LS(y, tx, w) \n return loss, w", "def SOR_Solve_Opt(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n omega = 1\n l = 5\n p = 2\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n x_new[row] -= A[row,column]*x_new[column]\n x_new[row] /= A[row,row]\n x_new[row] = (1.0-omega) * x[row] + omega*x_new[row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n #record change after iteration k\n if (l==iteration):\n dxl = np.linalg.norm(x_new-x)\n if (l + p == iteration):\n dxlp = np.linalg.norm(x_new-x)\n omega = 2.0/(1.0+np.sqrt(1-(dxlp/dxl)**(1.0/p)))\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def _solveX(L, U, b):\n m, n = L.shape\n # Forward Substitution\n y = list()\n y.insert(0, b[0]/L[0][0])\n for i in range(1, m):\n summ = 0\n for k in range(0, i):\n summ += L[i][k]*y[k]\n y.insert(i, (b[i]-summ)/(L[i][i]))\n\n # Backwards Substitution\n x = [0]*m\n x[m-1] = y[m-1] / U[m-1][m-1]\n for i in range(m - 2, -1, -1):\n summ = 0\n for k in range(i+1, n):\n summ += U[i][k]*x[k]\n x[i] = (y[i] - summ)/U[i][i]\n\n return x", "def solve_lower_triangular(amat, b):\n return solve_triangular_base(amat, b, lower=True)", "def project_L1_ball(x: \"fasta.linalg.Vector\", t: float) -> \"fasta.linalg.Vector\":\n # By Moreau's identity, we convert to proximal of dual problem (L-inf norm)\n return x - project_Linf_ball(x, t)", "def solve_L(L, b):\n\n raise NotImplementedError", "def linear_problem(A, b, eps=0.0001):\n\n M = A + eps * speye_like(A)\n return torch.solve(b if b.ndimension() >= 2 else torch.unsqueeze(b, -1),\n M if isdense(M) else M.to_dense())[0].squeeze()", "def lu_solve(A, b):\n return A.from_ddm(A.to_ddm().lu_solve(b.to_ddm()))", "def find_argmin_T(p_s, p_t, A_d,\n A, b):\n def f_error(x):\n A_tmp = np.reshape(x[0:9], newshape=(3,3))\n b_tmp = x[9:12]\n return(find_error(p_s, p_t, A_d,\n A_tmp, b_tmp))\n def flatten(A, b):\n # Flatten out A and b into x_0\n return(np.concatenate((np.reshape(A, newshape=(9,)), b)))\n x_0 = flatten(A, b)\n #sol = optimize.root(f_error, x_0, method='lm')\n print(\"minimizing the function now!!!\")\n sol = optimize.minimize(f_error, x_0)\n def expand(x):\n # Un-flattens x into the tuple of A and b\n return(np.reshape(x[0:9], newshape=(3,3)), x[9:12])\n\n A_tmp, b = expand(sol.x)\n print(\"==============\")\n print(\"A_tmp, before we make it near orthogonal\")\n print(A_tmp)\n print(\"its determinant\")\n print(np.linalg.det(A_tmp))\n print(\"==============\")\n #print(\"\")\n A = near_orthog(A_tmp)\n return(A, b)", "def compute_twist(rbt):\n #YOUR CODE HERE\n R = rbt[:3,:3]\n orientation = eqf.find_omega_theta(R)# omega/theta\n v = eqf.find_v(orientation[0], orientation[1], trans).reshape(3,)\n return (v, orientation[0])", "def RHS(y,t):\r\n\r\n return np.multiply(A.dot(y),ones-y)-beta*y", "def eqp_kktfact(H, c, A, b):\n n, = np.shape(c) # Number of parameters\n m, = np.shape(b) # Number of constraints\n\n # Karush-Kuhn-Tucker matrix of coefficients.\n # Defined as in Nocedal/Wright \"Numerical\n # Optimization\" p.452 in Eq. (16.4).\n kkt_matrix = csc_matrix(bmat([[H, A.T], [A, None]]))\n # Vector of coefficients.\n kkt_vec = np.hstack([-c, -b])\n\n # TODO: Use a symmetric indefinite factorization\n # to solve the system twice as fast (because\n # of the symmetry).\n lu = linalg.splu(kkt_matrix)\n kkt_sol = lu.solve(kkt_vec)\n x = kkt_sol[:n]\n lagrange_multipliers = -kkt_sol[n:n+m]\n\n return x, lagrange_multipliers" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the log determinant of a positivedefinite symmetric toeplitz matrix. The determinant is computed recursively. The intermediate solutions of the Levinson recursion are expolited.
def toeplitz_slogdet(r): n = len(r) r_0 = r[0] r = np.concatenate((r, np.array([r_0]))) r /= r_0 # normalize the system so that the T matrix has diagonal of ones logdet = n*np.log(np.abs(r_0)) sign = np.sign(r_0)**n if n == 1: return (sign, logdet) # now on is a modification of Levinson algorithm y = zeros((n,)) x = zeros((n,)) b = -r[1:n+1] r = r[:n] y[0] = -r[1] x[0] = b[0] beta = 1 alpha = -r[1] d = 1 + dot(-b[0], x[0]) sign *= np.sign(d) logdet += np.log(np.abs(d)) for k in range(0,n-2): beta = (1 - alpha*alpha)*beta mu = (b[k+1] - dot(r[1:k+2], x[k::-1])) /beta x[0:k+1] = x[0:k+1] + mu*y[k::-1] x[k+1] = mu d = 1 + dot(-b[0:k+2], x[0:k+2]) sign *= np.sign(d) logdet += np.log(np.abs(d)) if k < n-2: alpha = -(r[k+2] + dot(r[1:k+2], y[k::-1]))/beta y[0:k+1] = y[0:k+1] + alpha * y[k::-1] y[k+1] = alpha return(sign, logdet)
[ "def fast_logdet(matrix):\n sign, ld = np.linalg.slogdet(matrix)\n if not sign > 0:\n return -np.inf\n return ld", "def log_abs_det_jacobian(self, z):\n pre_u = self.u_ + self.u\n pre_w = self.w_ + self.w\n a = F.softplus(self.a + self.inv)\n w = F.softmax(pre_w, dim=3)\n u = F.softmax(pre_u, dim=3)\n # Perform computation\n pre_sigm = torch.sum(u * a * z, 3) + self.b\n sigm = torch.sigmoid(pre_sigm)\n x_pre = torch.sum(w * sigm, dim=3)\n x_pre_clipped = x_pre * (1 - self.eps) + self.eps * 0.5\n logj = F.log_softmax(pre_w, dim=3) + logsigmoid(pre_sigm) + logsigmoid(-pre_sigm) + torch.log(a)\n # n, d, d2, dh\n logj = logj + F.log_softmax(pre_u, dim=3)\n # n, d, d2, dh, d1\n logj = torch.log(torch.sum(torch.exp(logj),3))\n # n, d, d2, d1\n logdet_ = logj + np.log(1 - self.eps) - (torch.log(x_pre_clipped) + torch.log(-x_pre_clipped + 1))\n return logdet_", "def log_det(chol):\n return 2. * tf.reduce_sum(tf.log(tf.matrix_diag_part(chol)), [-1])", "def log_abs_det_jacobian(self, x, y):\n return torch.ones(x.size()[:-1], dtype=x.dtype, layout=x.layout, device=x.device) * \\\n self.scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1).sum(-1)", "def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):\n if covariance_type == 'full':\n n_components, _, _ = matrix_chol.shape\n log_det_chol = (np.sum(np.log(\n matrix_chol.reshape(\n n_components, -1)[:, ::n_features + 1]), 1))\n\n elif covariance_type == 'tied':\n log_det_chol = (np.sum(np.log(np.diag(matrix_chol))))\n\n elif covariance_type == 'diag':\n log_det_chol = (np.sum(np.log(matrix_chol), axis=1))\n\n else:\n log_det_chol = n_features * (np.log(matrix_chol))\n\n return log_det_chol", "def _inverse_log_det_jacobian(self, x):\n alpha, beta = self._get_alpha_beta()\n diff = x - self.x0\n r = tf.linalg.norm(diff, axis=-1, keepdims=True)\n h = 1. / (alpha + r)\n h_prime = -(h ** 2)\n beta_h = beta * h\n log_det_jacobian = tf.reduce_sum(\n (self.dim - 1) * tf.math.log1p(beta_h)\n + tf.math.log1p(beta_h + beta * h_prime * r), axis=-1)\n return log_det_jacobian", "def lemma_logdet(B, M, yivars, L):\n \n S = np.dot(M.T * yivars, M) * L\n d = np.einsum('ii->i', S)\n d += 1\n \n s, logdetS = np.linalg.slogdet(S)\n assert s==1\n \n logdetC_fast = np.sum(np.log(yivars**-1))\n #s, logdetC = np.linalg.slogdet(np.diag(yivars**-1))\n #assert s==1\n #print(logdetC, logdetC_fast, np.allclose(logdetC, logdetC_fast))\n \n logdetB = logdetS + logdetC_fast\n \n return logdetB", "def _gradLogDet(self, hyperparams,dK,columns =False ):\n KV = self.get_covariances(hyperparams)\n Si = KV['Si']\n if columns:\n d=(KV['Uc']*SP.dot(dK,KV['Uc'])).sum(0)\n RV = SP.dot(KV['Sr'],SP.dot(Si,d))\n if VERBOSE:\n #kd = SP.kron(KV['Sr'],d)\n kd = krondiag(KV['Sr'],d)\n #kd = krondiag_(KV['Sr'],d)\n RV_=SP.sum(kd*Si)\n check_dist(RV,RV_)\n else:\n #d=SP.dot(KV['Ur'].T,SP.dot(dK,KV['Ur'])).diagonal()\n d=(KV['Ur']*SP.dot(dK,KV['Ur'])).sum(0)\n RV = SP.dot(d,SP.dot(Si,KV['Sc']))\n if VERBOSE:\n #kd = SP.kron(d,KV['Sc'])\n kd=krondiag(d,KV['Sc'])\n #kd=krondiag_(d,KV['Sc'])\n RV_=SP.sum(kd*Si)\n check_dist(RV,RV_)\n return RV", "def plogdet(K):\n egvals = eigvalsh(K)\n return npsum(log(egvals[egvals > epsilon]))", "def determinant(A):\r\n # This is Crout's Algorithm.\r\n # U will remain zero in the lower left entries, and will be 1's along the diagonal.\r\n # L will remain zero in the upper right entries.\r\n # A = U L\r\n n = len(A)\r\n L = zeros(n) # Initialize with zeros Numerators for the lower triangular matrix\r\n U = zeros(n) # Initialize with zeros Numerators for the upper triangular matrix\r\n DL = ones(n) # Initialize with zeros Denominators for the lower triangular matrix\r\n DU = ones(n) # Initialize with zeros Denominators for the upper triangular matrix\r\n # L = [[0] * n] * n # Does not work because it initializes the matrix with references to the same lists\r\n # U = [[0] * n] * n # Does not work because it initializes the matrix with references to the same lists\r\n for j in range(0, n):\r\n assert len(A[j]) == n\r\n U[j][j] = 1 # set the diagonal entries of U to 1\r\n for i in range(j, n): # starting at L[j][j], solve j-th column of L\r\n tempL = A[i][j]\r\n tempDL = 1 # Temporary denominator for the lower triangular matrix\r\n for k in range(0, j):\r\n assert DL[i][k] != 0\r\n assert DU[k][j] != 0\r\n tempL = tempL * DL[i][k] * DU[k][j] - tempDL * L[i][k] * U[k][j]\r\n tempDL = tempDL * DL[i][k] * DU[k][j]\r\n L[i][j] = tempL\r\n DL[i][j] = tempDL\r\n for i in range(j + 1, n):# starting at U[j][j+1], solve j-th row of U\r\n tempU = A[j][i]\r\n tempDU = 1 # Temporary denominator for the upper triangular matrix\r\n for k in range(0, j):\r\n assert DU[k][i] != 0\r\n assert DL[j][k] != 0\r\n tempU = tempU * DU[k][i] * DL[j][k] - tempDU * L[j][k] * U[k][i]\r\n tempDU = tempDU * DU[k][i] * DL[j][k]\r\n U[j][i] = tempU * DL[j][j]\r\n if L[j][j] == 0:\r\n assert simplistic_determinant(A) == 0\r\n return 0 # The determinant is zero, so avoid dividing by zero by short circuiting the computation.\r\n DU[j][i] = tempDU * L[j][j]\r\n\r\n # Now calculate the determinant by multiplying the diagonal entries of the lower-left triangular matrix\r\n num = 1\r\n den = 1\r\n for i in range(0, n):\r\n assert U[i][i] == 1\r\n for j in range(0, i):\r\n assert U[i][j] == 0\r\n for j in range(i + 1, 3):\r\n assert L[i][j] == 0\r\n num *= L[i][i]\r\n den *= DL[i][i]\r\n # Now divide the denominator den from the numerator.\r\n # The numerator should evenly divide (assuming the input matrix A only had ints),\r\n # Was having trouble with den equaling 0. Fixed it by returning zero if any L[j][j] was 0. See above.\r\n assert den != 0\r\n det1 = num // den\r\n det2 = simplistic_determinant(A)\r\n if det1 != det2:\r\n print(\"Mismatch! \", det1, det2)\r\n return det2", "def determinant(self):\n if self.n_rows != self.n_cols:\n raise Exception('Matrix is not square')\n if self.n_rows == 2:\n return (self.data[0][0] * self.data[1][1]) - (self.data[1][0] * self.data[0][1])\n else:\n echelon, ops = reduce_to_echelon(self.data.copy(), True)\n swaps = sum([1 if row[0] == 'swap' else 0 for row in ops])\n return math.prod([echelon[i][i] for i in range(len(echelon))]) * (-1) ** swaps", "def determinant(self):\n r, s = self.signature_pair_of_matrix()\n return (-1)**s*prod([ G.determinant() for G in self._local_symbols ])", "def determinant(A):\n \n total = 0\n\n if len(A) == 1:\n return A[0][0]\n\n for col in range(len(A)):\n Asub = A[1:]\n for j in range(len(A)-1):\n Asub[j] = Asub[j][:col] + Asub[j][col+1:]\n subdet = determinant(Asub)\n sign = (-1) ** (col % 2)\n total += sign * A[0][col] * subdet\n return total", "def determinant(A):\n \n total = 0\n\n if len(A) == 1:\n return A[0][0]\n\n for col in range(len(A)):\n Asub = A[1:]\n for j in range(len(A)-1):\n Asub[j] = Asub[j][:col] + Asub[j][col+1:]\n subdet = determinant(Asub)\n sign = (-1) ** (col % 2)\n total += sign * A[0][col] * subdet\n\n return total", "def det(A):\n # Section 1: Establish n parameter and copy A\n n = len(A)\n AM = A[:]\n\n # Section 2: Row manipulate A into an upper triangle matrix\n for fd in range(n): # fd stands for focus diagonal\n if AM[fd][fd] == 0:\n AM[fd][fd] = 1.0e-18 # Cheating by adding zero + ~zero\n for i in range(fd+1, n): # skip row with fd in it.\n crScaler = AM[i][fd] / AM[fd][fd] # cr stands for \"current row\".\n for j in range(n): # cr - crScaler * fdRow, one element at a time.\n AM[i][j] = AM[i][j] - crScaler * AM[fd][j]\n\n # Section 3: Once AM is in upper triangle form ...\n product = 1.0\n for i in range(n):\n product *= AM[i][i] # ... product of diagonals is determinant\n\n return product", "def task5_determinant(matrix):\n return np.linalg.det(matrix)", "def determinant(self):\n if not self.is_square():\n raise(ValueError, \"Cannot calculate determinant of non-square matrix.\")\n if self.h > 2:\n raise(NotImplementedError, \"Calculating determinant not implemented for matrices largerer than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n return self.g[0][0] # a 1x1 matrix\n else:\n return ((self.g[0][0] * self.g[1][1]) - (self.g[0][1] * self.g[1][0])) # a 2x2 matrix\n # TODO - your code here", "def logit_deriv(y):\n# if y.any() < 0.0 or y.any() > 1.0:\n# raise Exception\n\n return y*(1-y)", "def determinant(self):\n if not self.is_square():\n raise ValueError('Matrix must be square')\n tmp_matrix = list(map(list, deepcopy(self.data))) # change tuples in data to list, for make them mutable\n # bring the tmp_matrix into a triangular form\n for index in range(self._width):\n for i in range(index + 1, self._width):\n if tmp_matrix[index][index] == 0: # if diagonal element equal zero, change it to approximately zero\n tmp_matrix[index][index] = 1.0e-14 # in other case will be ZeroDivisionError\n scalar = tmp_matrix[i][index] / tmp_matrix[index][index]\n for j in range(self._height):\n tmp_matrix[i][j] = tmp_matrix[i][j] - scalar * tmp_matrix[index][j]\n # the determinant equal to the product of diagonal elements of the triangular matrix\n result = 1\n for index in range(self._width):\n result *= tmp_matrix[index][index]\n # need to approximate the result to avoid the tails of the product of floating point numbers\n return round(result, 10)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Preprocessing needed for toeplitz_inverse_multiplication()
def toeplitz_inverse_multiplication_prep(T_column): phi=1 psi=2 assert phi != 0 assert psi != 0 assert phi != psi n = len(T_column) x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) ) y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) ) x_0 = x[0] D_phi = (phi**(1/n))**np.arange(0,n) D_psi = (psi**(1/n))**np.arange(0,n) Lambda_1 = fft(D_psi*x) Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1]))) Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1]))) Lambda_4 = fft(D_phi*x) return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)
[ "def transformPreMultiply(*args):\n return _almathswig.transformPreMultiply(*args)", "def bd_toeplitz_inverse_multiplication(u, *arrs):\n \n y = zeros(shape(u))\n n_start = 0\n n_end = 0\n for t in arrs:\n n_start = n_end\n n_end += len(t[3]) # len(t[3]) is the length of the block\n y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t)\n assert len(y) == n_end\n return y", "def comp_inverse_un(switch,nbTermes,maxInt,precision):\n P = poly_random(switch,nbTermes,maxInt)\n P = P - P(0) + P.parent().one()\n return inverse_un_series(P,precision)", "def de_mult(self,z):\n if isinstance(z,np.ndarray) and z.size>1:\n assert np.all(np.diff(z)>0.)\n return (z+1.)**(3.*(1.+self.w))", "def transform(p, m):\n return (p[0] * m[0] + p[1] * m[4] + p[2] * m[8] + m[12],\n p[0] * m[1] + p[1] * m[5] + p[2] * m[9] + m[13],\n p[0] * m[2] + p[1] * m[6] + p[2] * m[10] + m[14])", "def reconstruct(Pf, Pt, Pz):\n return np.matmul(np.transpose(Pt), Pz[:, np.newaxis] * Pf)", "def _build_m_z_inv(self):\n\n M = self.M_z.copy()\n\n self.M_z_inv = lin.inv(M).tocsc()", "def revert_output_preprocessing(self, output):\n return np.exp(output)", "def test_preprocessing_unrotate(self):\n pass", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def test__inverse_transform_continuous(self):", "def complex_inverse(c1,cr):", "def inverse_fisher_z_transform(z):\r\n return ((e ** (2 * z)) - 1.) / ((e ** (2 * z)) + 1.)", "def find_mult_inverse(self, p):\n for i in self.table:\n prod = self.mult(p,i)\n while prod and prod[-1] == 0:\n prod.pop()\n if prod == [1]:\n return self.simplified(i)\n break\n return None", "def sbf_inverse_uniprocessor(x):\n return x", "def inf_zero_one_to_triple(p,q,r):\n ### infinity = [1,0], zero = [0,1], one = [1,1] in CP^1\n p1,p2=p\n q1,q2=q\n r1,r2=r\n M = [[p1,q1],[p2,q2]]\n Minv = matrix2_inv(M)\n [mu,lam] = matrix_mult_vector(matrix2_inv([[p1,q1],[p2,q2]]), [r1,r2])\n return [[mu*p1, lam*q1],[mu*p2, lam*q2]]", "def u_inverse(U, check=False, verbose=False):\n\n m, n = U.shape\n\n if verbose:\n print(\"u_inverse\")\n print(shortstr(U))\n\n #items = []\n leading = []\n for row in range(m):\n #cols = numpy.where(U[row, :])[0]\n cols = U.get_cols(row)\n #print(\"row %d, cols %s\"%(row, cols))\n if not len(cols):\n break\n col = cols[0]\n assert U[row, col]\n leading.append(col)\n\n #print(\"leading:\", leading)\n assert sorted(leading) == leading\n assert len(set(leading)) == len(leading)\n\n U1 = zeros(n, m)\n\n #print( shortstr(U))\n\n # Work backwards\n i = len(leading)-1 # <= m\n while i>=0:\n\n j = leading[i]\n #print( \"i=%d, j=%d\"%(i, j))\n r = Fraction(1, U[i, j])\n U1[j, i] = r\n\n #print( \"U, U1, U*U1:\")\n #print( shortstrx(U, U1, dot(U, U1)))\n\n k = i-1\n while k>=0:\n #print( \"dot\")\n #print( shortstr(U[k,:]))\n #print( shortstr(U1[:,i]))\n r = dot(U[k, :], U1[:, i])\n #print( \"=\", r)\n if r != 0:\n j = leading[k]\n s = U[k, j]\n #print( \"set\", j, i)\n U1[j, i] = -Fraction(r, s)\n #print( shortstr(U1[:,i]))\n assert dot(U[k, :], U1[:, i]) == 0\n k -= 1\n i -= 1\n\n return U1", "def preprocess_input(input_pixels):\n\n input_pixels[:, 1] /= 31.0\n input_pixels[:, 2] += 1.0\n input_pixels[:, 2] /= 2.2\n return input_pixels", "def mul_inplace(a, b):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
matrix multiplication with the inverse of a blockdiagonal matrix having Toeplitz blocks. y = T u Analogous to toeplitz_inverse_multiplication()
def bd_toeplitz_inverse_multiplication(u, *arrs): y = zeros(shape(u)) n_start = 0 n_end = 0 for t in arrs: n_start = n_end n_end += len(t[3]) # len(t[3]) is the length of the block y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t) assert len(y) == n_end return y
[ "def chol_inverse_diag(t):\n (uu, nrows) = t.shape\n B = np.zeros((uu, nrows), dtype=\"float64\")\n B[1, nrows - 1] = 1.0 / t[1, nrows - 1] ** 2\n B[0, nrows - 1] = -t[0, nrows - 1] * B[1, nrows - 1] / t[1, nrows - 2]\n for j in reversed(range(nrows - 1)):\n tjj = t[1, j]\n B[1, j] = (1.0 / tjj - t[0, j + 1] * B[0, j + 1]) / tjj\n B[0, j] = -t[0, j] * B[1, j] / t[1, j - 1]\n return B", "def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)", "def inverse_matrice(T):\n a,b,c,d = T[0][0],T[0][1],T[1][0],T[1][1]\n det = a*d-b*c\n aa,bb,cc,dd = d/det,-b/det,-c/det,a/det\n Tinv = [[aa,bb],[cc,dd]]\n return Tinv", "def test_BlockMatrix_2x2_inverse_numeric():\n M = Matrix([[1, 2], [3, 4]])\n # rank deficient matrices that have full rank when two of them combined\n D1 = Matrix([[1, 2], [2, 4]])\n D2 = Matrix([[1, 3], [3, 9]])\n D3 = Matrix([[1, 4], [4, 16]])\n assert D1.rank() == D2.rank() == D3.rank() == 1\n assert (D1 + D2).rank() == (D2 + D3).rank() == (D3 + D1).rank() == 2\n\n # Only A is invertible\n K = BlockMatrix([[M, D1], [D2, D3]])\n assert block_collapse(K.inv()).as_explicit() == K.as_explicit().inv()\n # Only B is invertible\n K = BlockMatrix([[D1, M], [D2, D3]])\n assert block_collapse(K.inv()).as_explicit() == K.as_explicit().inv()\n # Only C is invertible\n K = BlockMatrix([[D1, D2], [M, D3]])\n assert block_collapse(K.inv()).as_explicit() == K.as_explicit().inv()\n # Only D is invertible\n K = BlockMatrix([[D1, D2], [D3, M]])\n assert block_collapse(K.inv()).as_explicit() == K.as_explicit().inv()", "def getInverseMatrix(self) -> CMatrix4:\n ...", "def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi", "def inverse(self) -> \"Matrix\":\n new_mtx = Matrix(self._nrows, self._ncols)\n # step 1. Create a matrix of minors, taking the determinants of the submatrices\n for row in range(self._nrows):\n for col in range(self._ncols):\n val = self.get_sub_matrix(row, col).get_determinant()\n # step 2. Apply the checkerboard of alternating signs to the matrix values\n val *= ((-1) ** (row + col))\n new_mtx.set(row, col, val)\n # step 3. Transpose the matrix\n new_mtx = new_mtx.transpose()\n # step 4. Multiply the matrix by (1/d), where 'd' is the original matrix's determinant\n d = self.get_determinant()\n if d == 0:\n raise MatrixHasNoInverseError()\n return new_mtx.scale(1 / d)", "def inverse(self):\n inverse = []\n inverse_row = [] \n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n if self.h == 1:\n inverse_row . append(1/float(self.g[0][0]))\n inverse . append (inverse_row)\n return Matrix(inverse)\n \n # TODO - your code here\n if self.h == 2:\n # Intializing variable to calculate determination\n a = self.g[0][0]\n b = self.g[0][1]\n c = self.g[1][0]\n d = self.g[1][1]\n det_A = (a * d) - (b * c)\n \n # Intializing variable to implement[[d, -b], [-c, a]]\n self.g[0][0] = d\n self.g[0][1] = - b\n self.g[1][0] = - c\n self.g[1][1] = a\n \n # For loop to calculate the inverse \n for i in range(len(self.g)):\n inverse_row = []\n for j in range(len(self.g[0])):\n inverse_row . append ((1 / float(det_A)) * self.g[i][j])\n inverse . append(inverse_row) \n return Matrix(inverse)", "def star_inverse(m):\n if (m.shape[0] != m.shape[1]):\n raise ValueError(\"m must be a square matrix! \")\n return star(np.eye(m.shape[0]) - m)", "def inverse(self):\n ((c, ms, x),(s, c2, y), (z1, z2, o)) = self.matrix\n return Transform([[c, s, (-c*x)-(s*y)],\n [-s, c, (s*x)-(c*y)],\n [0, 0, 1]])", "def u_inverse(U, check=False, verbose=False):\n\n m, n = U.shape\n\n if verbose:\n print(\"u_inverse\")\n print(shortstr(U))\n\n #items = []\n leading = []\n for row in range(m):\n #cols = numpy.where(U[row, :])[0]\n cols = U.get_cols(row)\n #print(\"row %d, cols %s\"%(row, cols))\n if not len(cols):\n break\n col = cols[0]\n assert U[row, col]\n leading.append(col)\n\n #print(\"leading:\", leading)\n assert sorted(leading) == leading\n assert len(set(leading)) == len(leading)\n\n U1 = zeros(n, m)\n\n #print( shortstr(U))\n\n # Work backwards\n i = len(leading)-1 # <= m\n while i>=0:\n\n j = leading[i]\n #print( \"i=%d, j=%d\"%(i, j))\n r = Fraction(1, U[i, j])\n U1[j, i] = r\n\n #print( \"U, U1, U*U1:\")\n #print( shortstrx(U, U1, dot(U, U1)))\n\n k = i-1\n while k>=0:\n #print( \"dot\")\n #print( shortstr(U[k,:]))\n #print( shortstr(U1[:,i]))\n r = dot(U[k, :], U1[:, i])\n #print( \"=\", r)\n if r != 0:\n j = leading[k]\n s = U[k, j]\n #print( \"set\", j, i)\n U1[j, i] = -Fraction(r, s)\n #print( shortstr(U1[:,i]))\n assert dot(U[k, :], U1[:, i]) == 0\n k -= 1\n i -= 1\n\n return U1", "def right_inverse(mat):\n return mat.T @ np.linalg.inv(mat @ mat.T)", "def inv(transform_matrix):\n\n r = transform_matrix[0:3, 0:3]\n t = transform_matrix[0:3, 3]\n t_inv = -1 * r.T.dot(t)\n transform_inv = np.eye(4)\n transform_inv[0:3, 0:3] = r.T\n transform_inv[0:3, 3] = t_inv\n\n return transform_inv", "def inverse(self):\n\t\tif not self.is_square():\n\t\t\traise(ValueError, \"Non-square Matrix does not have an inverse.\")\n\t\tif self.row_len > 2:\n\t\t\traise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\t\n\t\t# TODO - your code here\n\t\tinverse = [] \n\t\tif self.row_len == 1: \n\t\t\tinverse.append([1 / self.g[0][0]])\n\t\telif self.row_len == 2: \n\t\t\t# If the matrix is 2x2, check that the matrix is invertible\n\t\t\tif self.g[0][0] * self.g[1][1] == self.g[0][1] * self.g[1][0]:\n\t\t\t\traise ValueError('The matrix is not invertible.')\n\t\t\telse:\n\t\t\t\tdet = self.determinant() \n\t\t\t\ta = self.g[0][0]\n\t\t\t\tb = self.g[0][1]\n\t\t\t\tc = self.g[1][0]\n\t\t\t\td = self.g[1][1]\n\t\t\t\tinverse = [[d, -b],[-c, a]]\n\n\t\t\t\tfor i in range(len(inverse)):\n\t\t\t\t\tfor j in range(len(inverse[0])):\n\t\t\t\t\t\tinverse[i][j] = det * inverse[i][j]\n\t\t\t\t\n\t\treturn Matrix(inverse)", "def block_inverse(iA, B, C, D):\n logger = logging.getLogger(__name__)\n logger.info(\"------- block_inverse(iA, B, C, D) -------\")\n logger.debug(\"Input matrix types are: {} {} {} {}\".format(type(iA), type(B),\n type(C), type(D)))\n logger.debug(\"Input matrix shapes are: {} {} {} {}\".format(\n np.array(iA).shape,\n np.array(B).shape,\n np.array(C).shape,\n np.array(D).shape))\n n = iA.shape[0]\n result = np.zeros([n+1, n+1])\n CA = np.dot(C, iA).reshape([1,n])\n AB = np.dot(iA, B).reshape([n,1])\n det = D-CA.dot(B)\n result[:n,:n] = iA + AB.dot(CA) / det\n result[n:n+1,:n] = -CA / det\n result[:n,n:n+1] = -AB / det\n result[n,n] = 1 / det\n return result", "def mulI( self ):\n return Mat( -self.m11, self.m12, self.m13,\n self.m21, -self.m22, self.m23,\n self.m31, self.m32, -self.m33 )", "def inverse(self):\n if self.determinant() != 0:\n ops = reduce_to_red_echelon(self.data.copy(), True)[1]\n matrix = identity_matrix(self.n_rows).data\n \n if ops:\n if isinstance(ops[0], str):\n ops = [ops]\n \n for op in ops:\n if op[0] == 'swap':\n matrix = row_swap(matrix, op[1], op[2])\n elif op[0] == 'multiplication':\n matrix = row_multiply(matrix, op[1], op[2])\n elif op[0] == 'subtract':\n matrix = row_subtract(matrix, op[1], op[2], op[3])\n else:\n raise ValueError('Row operation not recognized')\n else:\n raise ValueError('Matrix has a determinant of 0 and is not invertible')\n return Matrix(matrix)", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def inv(mat):\n return mat.inv()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a single line of csvtoarrow output. Raise RuntimeError if a line cannot be parsed. (We can't recover from that because we don't know what's happening.)
def _parse_csv_to_arrow_warning(line: str) -> I18nMessage: for pattern, builder in _ERROR_PATTERNS: match = pattern.match(line) if match: return builder(**match.groupdict()) raise RuntimeError("Could not parse csv-to-arrow output line: %r" % line)
[ "def __parse_csv_line(self, csv_line):\n if Case.label_column == -1:\n raise Exception(\"Cannot parse CSV file until properties of file have been specified to the Case class\")\n\n # Loop through each comma-separated item in the line, after first truncating the newline from the end\n for idx, item in enumerate(csv_line[0:len(csv_line) - 1].split(\",\")):\n if idx == Case.label_column:\n self.label = item # Save column tagged as label as a string\n else:\n try:\n self.attributes.append(float(item)) # Parse each column tagged as an attribute as a float\n except ValueError:\n logging.error(\"Cannot parse attribute \\\"%s\\\" into a floating-point number\" % item)\n raise ParseCsvError(item, csv_line)\n self.attributesAlreadyExamined.append(False)\n self.predicted = None", "def test_parseLine2(mocker):\n \n # given: setup test framework\n worker = Worker()\n testString = \"11/11/19,Brighter Futures,12000\"\n \n # when:\n result = worker.parseLineCSV(testString)\n \n # then: (Using PyTruth assertions)\n AssertThat(result).IsNone()", "def test_parseLine1(mocker):\n \n # given: setup test framework\n worker = Worker()\n testString = \"12Nov2019,Teacher,Brighter Futures,12000\"\n expectedResult = {\n 'date': '2019-11-12',\n 'job_title': 'Teacher',\n 'company_name': 'Brighter Futures',\n 'salary': 12000\n }\n \n # when:\n result = worker.parseLineCSV(testString)\n \n # then:\n assert result == expectedResult", "def parse_csv(line: str) -> str: \n\n try:\n\n Q_column_headers = ['trade_dt','arrival_tm','rec_type','symbol','event_tm', \\\n 'event_seq_nb','exchange','bid_pr','bid_size','ask_pr','ask_size']\n T_column_headers = ['trade_dt','arrival_tm','rec_type','symbol','event_tm', \\\n 'event_seq_nb','exchange','trade_pr']\n \n bid_pr = None\n bid_size = None\n ask_pr = None\n ask_size = None\n trade_pr = None\n\n record = line.split(\",\")\n if record[2] == 'Q':\n record_dict = dict(zip(Q_column_headers, record))\n bid_pr = Decimal(record_dict['bid_pr'])\n bid_size = int(record_dict['bid_size'])\n ask_pr = Decimal(record_dict['ask_pr'])\n ask_size = int(record_dict['ask_size'])\n else:\n record_dict = dict(zip(T_column_headers, record))\n trade_pr = Decimal(record_dict['trade_pr'])\n\n trade_dt = datetime.datetime.strptime(record_dict['trade_dt'], '%Y-%m-%d')\n rec_type = record_dict['rec_type']\n symbol = record_dict['symbol']\n exchange = record_dict['exchange']\n event_tm = datetime.datetime.strptime(record_dict['event_tm'], '%Y-%m-%d %H:%M:%S.%f')\n event_seq_nb = int(record_dict['event_seq_nb'])\n arrival_tm = datetime.datetime.strptime(record_dict['arrival_tm'], '%Y-%m-%d %H:%M:%S.%f')\n partition = rec_type\n\n return [trade_dt, rec_type, symbol, exchange, event_tm, event_seq_nb, arrival_tm, \\\n trade_pr, bid_pr, bid_size, ask_pr, ask_size, partition]\n \n except Exception as e:\n \n # If anything goes wrong, output empty record with \"B\" partition\n # empty_str = \",\" * (COMMON_EVENT_COLUMN_COUNT - 1)\n # return f\"{empty_str}B\".split(\",\")\n\n return [ None for i in range(COMMON_EVENT_COLUMN_COUNT - 1) ] + ['B']", "def parse(cls, line):\r\n raise NotImplementedError", "def from_csv_line(line):\r\n return line.strip().split(',')", "def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert", "def processLine(line):\n assert line[-1] == '\\n'\n line = line[:-1]\n (ticker, date, open_, high, low, close, volume, dividends, splits,\n adj_open, adj_high, adj_low, adj_close, adj_volume) = line.split(',')\n # Check date.\n try:\n tmp_date = datetime.datetime.strptime(date, '%Y-%m-%d')\n except ValueError:\n assert False, 'unsupported date format in line: %s' % line\n assert checkOHLC(open_, high, low, close), 'invalid OHLC in line: %s' % line\n assert checkOHLC(adj_open, adj_high, adj_low, adj_close), (\n 'invalid adj OHLC in line: %s' % line)\n assert checkVolume(volume), 'invalid volume in line: %s' % line\n if adj_volume != '':\n adj_volume = getNonNegFloat(adj_volume)\n assert adj_volume is not None, 'invalid adj volume in line: %s' % line\n if dividends != '':\n dividends = getNonNegFloat(dividends)\n assert dividends is not None, 'invalid dividends in line: %s' % line\n if splits != '':\n splits = getNonNegFloat(splits)\n assert splits is not None, 'invalid splits in line: %s' % line\n\n return ticker, date, line", "def line_to_row(line):\n m = line_re.match(line)\n if m:\n return Row(hostname=m.group(1), transferred=m.group(2))\n else:\n return None", "def line_parser(one_line):\n one_line_splitted = one_line.split('\\t')\n\n if not len(one_line_splitted) == 3:\n raise Exception('Not enough columns in data file')\n else:\n res = {\n \"epoch\": one_line_splitted[0],\n \"seconds\": one_line_splitted[1],\n \"description\": one_line_splitted[2].replace('#','').replace('\\n','').strip()\n }\n\n return res", "def parse(self, line):\n try:\n (year, month, day, hour, minute, second, microseconds, offset_hour, offset_minute, source, process, logentry) = re.match('^(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d)T(\\d\\d):(\\d\\d):(\\d\\d)\\.([\\d]+)\\+(\\d\\d):(\\d\\d) ([a-z]+)\\[([a-zA-Z0-9_.]+)\\]: ([0-9a-z-A-Z\\-_\\.\\[\\]:\\?\\#\\\",/\\ ={}\\'\\(\\)<>]+)$', line).groups()\n except:\n pass\n \n try:\n parsed_data = dict()\n parsed_data['timestamp'] = \" \".join([\"-\".join([year, month, day]), \":\".join([hour, minute, second])])\n parsed_data['log_time'] = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n parsed_data['log_source'] = source\n parsed_data['log_type'] = process\n except (AttributeError, UnboundLocalError):\n PARSE_ERRORS.append(line)\n return False\n\n #TODO: This still needs work on spaces in values surrounded by \" \" \n if parsed_data['log_source'] == \"heroku\":\n if logentry.__len__() > 1:\n logentry = re.sub(', ', ',', logentry)\n line_chunks = re.split(' ', logentry)\n for chunk in line_chunks:\n line_chunks = re.split('=', chunk)\n if line_chunks.__len__() > 2:\n #fwd and path are a little clunky to parse\n pass\n elif line_chunks.__len__() > 1:\n parsed_data[line_chunks[0]] = line_chunks[1]\n else:\n pass\n else:\n return False\n else:\n # TODO: [app] \n # Needs parsing. Do that here.\n return False\n\n return parsed_data", "def from_cvs_to_obj(csv_line):\n arg_arr = csv_line.split(\",\")\n if len(arg_arr) != 8:\n print(\"Error : Few data are missing\", file=stderr)\n exit(-1)\n return Flight(arg_arr[0], arg_arr[1], arg_arr[2], arg_arr[3], arg_arr[4], arg_arr[5], arg_arr[6], arg_arr[7])", "def parse_line(line):\n pattern = re.compile(REGEX)\n result = pattern.match(line, re.I)\n if result != None:\n part = result.groups()\n return LogEntry(part[0], part[3], part[4], part[5], part[6])\n return None", "def parse_line(self, line):\n \n if self._multiline_key:\n self.while_multi_line(line)\n \n elif self.EXTENDS_LINE.match(line):\n self.extends_line_match(self.EXTENDS_LINE.match(line), line)\n \n elif self.FROM_FILE_LINE.match(line):\n self.from_file_line_match(self.FROM_FILE_LINE.match(line), line)\n \n elif self.ONE_LINE.match(line):\n self.one_line_match(self.ONE_LINE.match(line), line)\n \n elif self.SANDBOX_FILE_LINE.match(line):\n self.sandbox_file_line_match(self.SANDBOX_FILE_LINE.match(line), line)\n \n elif self.MULTI_LINE.match(line):\n self.multi_line_match(self.MULTI_LINE.match(line), line)\n \n elif self.COMMENT_LINE.match(line):\n self.dic['__comment'] += '\\n' + self.COMMENT_LINE.match(line).group('comment')\n \n elif not self.EMPTY_LINE.match(line):\n raise SyntaxErrorPL(self.path_parsed_file, line, self.lineno)", "def read_from_line(cls, line):\n raise RuntimeError(\"%s feature can't be \"\n \"read from a line!\", str(cls))", "def process_line():\n pass", "def _parse_csv(cls, filepath):\n cls._filename = basename(filepath)\n with codecs.open(filepath, mode='rb', encoding='ascii') as fp:\n # Determine type of EVE CSV file and parse\n line1 = fp.readline()\n fp.seek(0)\n\n if line1.startswith(\"Date\"):\n return cls._parse_average_csv(fp)\n elif line1.startswith(\";\"):\n return cls._parse_level_0cs(fp)", "def process_line(self, line):\n columns = line.split('|')\n\n if len(line) == 0 or len(columns) < 16:\n return None # empty line or malformed line\n\n cmte_id, name, zip_code = columns[0], columns[7], columns[10][:5]\n transaction_dt, transaction_amt = columns[13], columns[14]\n other_id = columns[15]\n\n if len(other_id) > 0 or len(transaction_amt) == 0 or len(cmte_id) == 0 or len(name) == 0 or len(zip_code) < 5:\n return None # malformed data fields, ignore this line\n transaction_date = string_to_date(transaction_dt)\n if transaction_date is None:\n return None # 'TRANSACTION_DT' is an invalid date\n\n try:\n if self.repeat_donor(name, zip_code, transaction_date.year):\n # this record is from a repeat donor in any prior calendar year\n amount = float(transaction_amt)\n key = RecipientZipYear(cmte_id, zip_code, transaction_date.year)\n if key not in self.running_percentile:\n self.running_percentile[key] = RunningPercentile(self.percentile)\n self.running_percentile[key].add(amount)\n return self.print_record(key)\n else:\n return None # this record is not from a repeat donor\n except:\n return None # exception may comes from malformed line, so just ignore this line", "def test_import_csv_file_a_row(self):\n\n complete_data = parse_csv_file(self.test_file_path)\n self.assertEqual(self.expected_data, complete_data[0])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return true if we should fastskip converting a pa.Array. The _true_ reason for this function is to test whether an Array contains "Inf" or "NaN". A numberconversion library will parse those. But _this_ library is for Workbench, and Workbench doesn't support NaN/Inf. So this function helps us decide _not_ to autoconvert a column when the intent isn't perfectly clear. Assume `arr` is of type `utf8` or a dictionary of `utf8`. Assume there are no gaps hidden in null values in the buffer. (It's up to the caller to prove this.)
def _utf8_chunk_may_contain_inf_or_nan(chunk: pyarrow.Array) -> bool: _, offsets_buf, data_buf = chunk.buffers() offsets = array.array("i") assert offsets.itemsize == 4 offsets.frombytes(offsets_buf) if sys.byteorder != "little": offsets.byteswap() # pyarrow is little-endian offset0 = offsets[chunk.offset] offsetN = offsets[chunk.offset + len(chunk)] # len(offsets) == 1 + len(chunk) b = data_buf[offset0:offsetN].to_pybytes() return SCARY_BYTE_REGEX.search(b) is not None
[ "def asarray_chkfinite(a):\n a = asarray(a)\n if (a.dtype.char in typecodes['AllFloat']) \\\n and (_nx.isnan(a).any() or _nx.isinf(a).any()):\n raise ValueError, \"array must not contain infs or NaNs\"\n return a", "def contains_inf(arr, node=None, var=None):\n if not _is_numeric_value(arr, var):\n return False\n elif getattr(arr, \"dtype\", \"\") in discrete_dtypes:\n return False\n return np.isinf(np.nanmax(arr)) or np.isinf(np.nanmin(arr))", "def pyarrow_array(arr, nan_to_null=False):\n import numpy as np\n import pyarrow as pa\n if nan_to_null and issubclass(arr.dtype.type,\n (np.floating, np.complexfloating)):\n isnan = np.isnan(arr)\n if isnan.any():\n pa_nul = pa.py_buffer(get_bitmap(isnan))\n return pa.Array.from_buffers(pa.from_numpy_dtype(arr.dtype),\n arr.size,\n [pa_nul, pa.py_buffer(arr)])\n return pa.Array.from_buffers(pa.from_numpy_dtype(arr.dtype),\n arr.size,\n [None, pa.py_buffer(arr)])", "def is_nonnegative_arr(arr):\n out = True\n if not np.all(arr > -1e-6):\n print('some negative entries in ' + str(arr))\n out = False\n return out", "def __where_not_nan(arr: np.ndarray):\n return np.where(np.isfinite(arr))", "def is_float_array(val):\n return is_np_array(val) and issubclass(val.dtype.type, np.floating)", "def nonans(array):\n return array[~np.isnan(array)]", "def _isnan_check(array):\n return np.isnan(array).any()", "def _is_double(arr):\n\n # Figure out which dtype for data\n if arr.dtype == np.float32:\n return False\n elif arr.dtype == np.float64:\n return True\n else:\n raise ValueError(\"Only float32 or float64 dtypes are supported\")", "def _is_strictly_increasing(array: np.ndarray) -> bool:\n return (np.diff(array) > 0).all().astype(bool)", "def isfillvalue(a):\n a = numpy.asarray(a)\n if a.dtype.kind == 'i':\n mask = a == -999999999\n elif a.dtype.kind == 'f':\n mask = numpy.isnan(a)\n elif a.dtype.kind == 'S':\n mask = a == ''\n else:\n raise ValueError('Fill value not known for dtype %s' % a.dtype)\n return mask", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def is_float(arr, *args):\n return arr.dtype is np.dtype(np.float)", "def is_positive_arr(arr):\n out = True\n if not np.all(arr > 0):\n print('some negative neg or zero entries in ' + str(arr))\n out = False\n return out", "def _has_array(self) -> Optional[bool]:\n\n if self.__has_array is not None:\n return self.__has_array\n\n # False -ves (array length is 1) are an acceptable compromise to extensive checking\n\n # W --- 01:145038 34:092243 --:------ 1FC9 006 07230906368E\n # I --- 01:145038 --:------ 01:145038 1FC9 018 07000806368E-FC3B0006368E-071FC906368E\n # I --- 01:145038 --:------ 01:145038 1FC9 018 FA000806368E-FC3B0006368E-FA1FC906368E\n # I --- 34:092243 --:------ 34:092243 1FC9 030 0030C9896853-002309896853-001060896853-0010E0896853-001FC9896853\n if self.code == _1FC9:\n self.__has_array = self.verb != RQ # safe to treat all as array, even len=1\n\n elif self.verb != I_ or self.code not in CODES_WITH_ARRAYS:\n self.__has_array = False\n\n elif self.len == CODES_WITH_ARRAYS[self.code][0]: # NOTE: can be false -ves\n self.__has_array = False\n\n else:\n _len = CODES_WITH_ARRAYS[self.code][0]\n assert (\n self.len % _len == 0\n ), f\"{self} << array has length ({self.len}) that is not multiple of {_len}\"\n assert (\n self.src.type in (\"12\", \"22\") or self.src == self.dst\n ), f\"{self} << array is from a non-controller (01)\"\n assert (\n self.src.type not in (\"12\", \"22\") or self.dst == NON_DEV_ADDR\n ), f\"{self} << array is from a non-controller (02)\"\n self.__has_array = True\n\n # I --- 10:040239 01:223036 --:------ 0009 003 000000 # not array\n # I --- 01:102458 --:------ 01:102458 0009 006 FC01FF-F901FF\n # I --- 01:145038 --:------ 01:145038 0009 006 FC00FF-F900FF\n # I 034 --:------ --:------ 12:126457 2309 006 017EFF-027EFF\n # I --- 01:223036 --:------ 01:223036 000A 012 081001F40DAC-091001F40DAC # 2nd fragment\n # I 024 --:------ --:------ 12:126457 000A 012 010001F40BB8-020001F40BB8\n # I --- 02:044328 --:------ 02:044328 22C9 018 0001F40A2801-0101F40A2801-0201F40A2801\n # I --- 23:100224 --:------ 23:100224 2249 007 007EFF7EFFFFFF # can have 2 zones\n # I --- 02:044328 --:------ 02:044328 22C9 018 0001F40A2801-0101F40A2801-0201F40A2801\n # I --- 02:001107 --:------ 02:001107 3150 010 007A-017A-027A-036A-046A\n\n return self.__has_array", "def spikes(arr):\n arr = np.array(arr)\n if (arr.size == 0) or flat(arr) or monotonic(arr):\n return False\n arr = normalize(arr)\n spikes = np.where(arr > arr.mean())[0]\n rest = np.ones_like(arr, dtype=bool)\n rest[spikes] = False\n return flat(arr[rest]) and flat(np.diff(arr[spikes]))", "def __isZeroEverywhere(self, array):\n epsilon = numpy.finfo( type(array[0]) ).eps\n boolList = numpy.less_equal(numpy.abs(array), epsilon)\n\n for b in boolList:\n if not b:\n return False\n return True", "def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)", "def test_dtype_None(self):\n array = np.array([[0, 1, 2], [2, 1, 0]]).T\n self.assertTrue(to_ndarray(array, None, safe=True).flags.contiguous,\n msg='to_ndarray: Non contiguous arrays are not being consolidated when dtype is None')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the config information with new dropout values.
def update_dropout(info, dropout, dropout_type, prop_name): if dropout_type == "schnet_dropout": info["model_params"]["schnet_dropout"] = dropout elif dropout_type == "chemprop_dropout": info["model_params"]["cp_dropout"] = dropout elif dropout_type == "readout_dropout": # if it's in the readout layers, find the dropout # layers in the readout dictionary and update them readout = info["model_params"]["readoutdict"] layer_dics = readout[prop_name] for layer_dic in layer_dics: if layer_dic["name"] == "Dropout": layer_dic["param"]["p"] = dropout info["model_params"]["readoutdict"] = {prop_name: layer_dics} elif dropout_type == "attention_dropout": info["model_params"]["boltzmann_dict"]["dropout_rate"] = dropout else: info["model_params"][dropout_type] = dropout
[ "def conf_update(self):\n pass", "def update(self):\n self.save_config_file()", "def update_config(self, config):\n self.config = config\n self.rate_dropout = nn.Dropout(config.DROPOUT_RATES)\n self.pos_encoder.update_config(config)\n self.transformer_encoder.update_config(config)\n self.src_mask = {} # Clear cache", "async def _update_config(self):\n if self.config['data'] is None or self.config_expired:\n data = await self.get_data(self.url_builder('configuration'))\n self.config = dict(data=data, last_update=datetime.now())", "def _updateConfigWidgets(self):\n self._ui.configModelCoordinates_fieldChooser.setField(self._fitter.getModelCoordinatesField())\n self._ui.configDataCoordinates_fieldChooser.setField(self._fitter.getDataCoordinatesField())\n self._ui.configMarkerGroup_fieldChooser.setField(self._fitter.getMarkerGroup())", "def changeDropout(self,dropout):\n self.dropout = dropout", "def update_values(self, config, dest):\n for section in config.keys():\n if section in dest:\n for option in config[section].keys():\n if option in (\"desc\", \"outline\"):\n continue\n\n if option in dest[section]:\n dest[section][option][\"value\"] = config[section][option][\n \"value\"\n ]\n\n # else:\n # dest[section][option] = config[section][option]\n\n # else:\n # dest[section] = config[section]", "def update_global_config(self, config, **kwargs):\n pass", "def update_config(self, data):\n self.config.data = dict_merge(self.config.data, data)\n self.config.save()", "def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)", "def OnConfigChange(self, input):\n return", "def image_config_update(self, new_variants, new_facets, new_mediators):\n\n if new_variants is not None:\n self.cfg.variants.update(new_variants)\n if new_facets is not None:\n self.cfg.facets = new_facets\n if new_mediators is not None:\n self.cfg.mediators = new_mediators\n self.save_config()", "def update_sensor_options(val):\n sensor_drop.options = sensor_dict[spacecraft_drop.value]", "def update_data(self):\n self.global_config.update()\n self.gl_opts_grid.foreach(self.gl_opts_grid.remove)\n self.build_rhvoice_conf_page(self.gl_opts_grid)", "def update(self, other):\n self._config.update(other)", "def update_config():\n config.update_config(config.usr_config_file, config.def_config_file)", "def update_config(self, config):\n # Save a copy of the current config.\n newconfig = deepcopy(self.config)\n # Merge the new config into the current one.\n newconfig.merge(config)\n # Save the combined config as self.config, which triggers the traits\n # events.\n self.config = newconfig", "def update_ranges(self):\n new_ranges = self.get_z_ranges()\n self.config.update_ranges(new_ranges)", "def update_config(self):\n dwam_params = {\n 'genesis_color_sets': self.genesis_color_sets,\n 'color_set_states': self.color_set_states\n }\n self.config['dwam'] = dwam_params" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the config information with the number of attention heads.
def update_heads(info, heads): info["model_params"]["boltzmann_dict"]["num_heads"] = heads # Concatenate the fingerprints produced by the different heads info["model_params"]["boltzmann_dict"]["head_pool"] = "concatenate" readoutdict = info["model_params"]["readoutdict"] feat_dim = info["model_params"]["mol_basis"] for key, lst in readoutdict.items(): for i, dic in enumerate(lst): if "param" in dic and "in_features" in dic.get("param", {}): # make sure that the input dimension to the readout is equal to # `heads * feat_dim`, where `feat_dim` is the feature dimension # produced by each head readoutdict[key][i]["param"]["in_features"] = feat_dim * heads break info["model_params"]["readoutdict"] = readoutdict
[ "def increment_config_version(self):\n self.config_version += 1\n if self.config_version > MAX_CONFIG_VERSION:\n self.config_version = 1", "def _make_attention(self):\n return self.config.attention_cls(\n num_heads=self.config.num_heads,\n dtype=self.config.dtype,\n qkv_features=self.config.qkv_dim,\n head_dim=self.config.head_dim,\n kernel_init=self.config.attention_kernel_init,\n bias_init=self.config.bias_init,\n use_bias=False,\n broadcast_dropout=True,\n rescale_logits=self.config.rescale_logits,\n dropout_rate=self.config.attention_dropout_rate,\n use_extra_logit=self.config.use_extra_logit,\n float32_logits=self.config.attention_float32_logits)", "def conf_update(self):\n pass", "def test_attention_net(self):\n\n # Checks that torch and tf embedding matrices are the same\n with tf1.Session().as_default() as sess:\n assert np.allclose(\n relative_position_embedding(20, 15).eval(session=sess),\n relative_position_embedding_torch(20, 15).numpy())\n\n # B is batch size\n B = 32\n # D_in is attention dim, L is memory_tau\n L, D_in, D_out = 2, 16, 2\n\n for fw, sess in framework_iterator(session=True):\n\n # Create a single attention layer with 2 heads\n if fw == \"torch\":\n # Create random Tensors to hold inputs and outputs\n x = torch.randn(B, L, D_in)\n y = torch.randn(B, L, D_out)\n\n value_labels = torch.randn(B, L, D_in)\n memory_labels = torch.randn(B, L, D_out)\n\n attention_net = TorchGTrXLNet(\n observation_space=gym.spaces.Box(\n low=float(\"-inf\"), high=float(\"inf\"), shape=(D_in, )),\n action_space=gym.spaces.Discrete(D_out),\n num_outputs=D_out,\n model_config={\"max_seq_len\": 2},\n name=\"TestTorchAttentionNet\",\n num_transformer_units=2,\n attn_dim=D_in,\n num_heads=2,\n memory_tau=L,\n head_dim=D_out,\n ff_hidden_dim=16,\n init_gate_bias=2.0)\n\n init_state = attention_net.get_initial_state()\n\n # Get initial state and add a batch dimension.\n init_state = [np.expand_dims(s, 0) for s in init_state]\n seq_lens_init = torch.full(\n size=(B, ), fill_value=L, dtype=torch.int32)\n\n # Torch implementation expects a formatted input_dict instead\n # of a numpy array as input.\n input_dict = {\"obs\": x}\n self.train_torch_full_model(\n attention_net,\n input_dict, [y, value_labels, memory_labels],\n num_epochs=250,\n state=init_state,\n seq_lens=seq_lens_init)\n # Framework is tensorflow or tensorflow-eager.\n else:\n x = np.random.random((B, L, D_in))\n y = np.random.random((B, L, D_out))\n\n value_labels = np.random.random((B, L, 1))\n memory_labels = np.random.random((B, L, D_in))\n\n # We need to create (N-1) MLP labels for N transformer units\n mlp_labels = np.random.random((B, L, D_in))\n\n attention_net = GTrXLNet(\n observation_space=gym.spaces.Box(\n low=float(\"-inf\"), high=float(\"inf\"), shape=(D_in, )),\n action_space=gym.spaces.Discrete(D_out),\n num_outputs=D_out,\n model_config={\"max_seq_len\": 2},\n name=\"TestTFAttentionNet\",\n num_transformer_units=2,\n attn_dim=D_in,\n num_heads=2,\n memory_tau=L,\n head_dim=D_out,\n ff_hidden_dim=16,\n init_gate_bias=2.0)\n model = attention_net.trxl_model\n\n # Get initial state and add a batch dimension.\n init_state = attention_net.get_initial_state()\n init_state = [np.tile(s, (B, 1, 1)) for s in init_state]\n\n self.train_tf_model(\n model, [x] + init_state,\n [y, value_labels, memory_labels, mlp_labels],\n num_epochs=200,\n minibatch_size=B)", "def update_config(self, config):\n self.config = config\n self.rate_dropout = nn.Dropout(config.DROPOUT_RATES)\n self.pos_encoder.update_config(config)\n self.transformer_encoder.update_config(config)\n self.src_mask = {} # Clear cache", "def update(self, config):\n self.n_topics = config['n_topics'] \n self.n_passes = config['n_passes'] \n self.min_docfreq = config['min_docfreq'] \n self.max_docfreq = config['max_docfreq']\n self.ngrams = config['ngrams'] \n self.n_words = config['n_words'] \n self.topic_range = config['topic_range'] \n self.ext_stop_words = config['ext_stop_words']", "def update(self, rxn_probs):\n pass", "def config_changed(self):\n self.config_version += 1\n self.driver.config_changed()", "def test_multi_head_attention(self):\n # B is batch size\n B = 1\n # D_in is attention dim, L is memory_tau\n L, D_in, D_out = 2, 32, 10\n\n for fw, sess in framework_iterator(\n frameworks=(\"tfe\", \"torch\", \"tf\"), session=True):\n # Create a single attention layer with 2 heads.\n if fw == \"torch\":\n\n # Create random Tensors to hold inputs and outputs\n x = torch.randn(B, L, D_in)\n y = torch.randn(B, L, D_out)\n\n model = TorchMultiHeadAttention(\n in_dim=D_in, out_dim=D_out, num_heads=2, head_dim=32)\n\n self.train_torch_layer(model, x, y, num_epochs=500)\n\n # Framework is tensorflow or tensorflow-eager.\n else:\n x = np.random.random((B, L, D_in))\n y = np.random.random((B, L, D_out))\n\n inputs = tf.keras.layers.Input(shape=(L, D_in))\n\n model = tf.keras.Sequential([\n inputs,\n MultiHeadAttention(\n out_dim=D_out, num_heads=2, head_dim=32)\n ])\n self.train_tf_model(model, x, y)", "def vqa_attention_base():\n hparams = common_hparams.basic_params1()\n hparams.batch_size = 2\n hparams.use_fixed_batch_size = True,\n hparams.optimizer = \"Adam\"\n hparams.optimizer_adam_beta1 = 0.9\n hparams.optimizer_adam_beta2 = 0.999\n hparams.optimizer_adam_epsilon = 1e-8\n hparams.weight_decay = 0\n hparams.clip_grad_norm = 0.\n hparams.initializer = \"uniform_unit_scaling\"\n hparams.initializer_gain = 2.\n hparams.learning_rate = 0.5\n hparams.learning_rate_schedule = \"legacy\"\n hparams.learning_rate_warmup_steps = 0\n hparams.learning_rate_decay_scheme = \"exp\"\n hparams.learning_rate_decay_rate = 0.5\n hparams.learning_rate_decay_steps = 50000\n\n # not used hparams\n hparams.label_smoothing = 0.\n hparams.multiply_embedding_mode = \"\"\n\n hparams.dropout = 0.5\n hparams.norm_type = \"layer\"\n hparams.layer_postprocess_sequence = \"nd\"\n hparams.layer_prepostprocess_dropout = 0.5\n\n # add new hparams\n # preprocess\n hparams.add_hparam(\"resize_side\", 512)\n hparams.add_hparam(\"height\", 448)\n hparams.add_hparam(\"width\", 448)\n hparams.add_hparam(\"distort\", True)\n\n hparams.add_hparam(\"train_resnet\", False)\n hparams.add_hparam(\"rnn_type\", \"lstm\")\n hparams.add_hparam(\"num_rnn_layers\", 1)\n hparams.add_hparam(\"max_question_length\", 15)\n # lstm hidden size\n hparams.hidden_size = 512\n\n hparams.add_hparam(\"attn_dim\", 512)\n hparams.add_hparam(\"num_glimps\", 2)\n\n hparams.add_hparam(\"num_mlp_layers\", 1)\n hparams.add_hparam(\"mlp_dim\", 1024)\n\n return hparams", "def _InitAttentionParams(self, atten_tpl):\n p = self.params\n\n if isinstance(p.num_heads, list) != isinstance(atten_tpl, list):\n raise ValueError('p.num_heads and p.atten_tpl should both be lists '\n f'or both scalars for {p.name} num_heads={p.num_heads}.')\n if isinstance(p.num_heads, list) and (len(p.num_heads) != len(atten_tpl)):\n raise ValueError('num_heads and atten_tpl should both be lists '\n 'of the equal sizes: '\n f'{len(p.num_heads)} vs {len(atten_tpl)}')\n\n def _SetCommonParams(params, name, num_heads):\n # Raise warning if self.params override params from atten_tpl\n for key in ['input_dim', 'hidden_dim', 'num_heads', 'atten_dropout_prob']:\n if params.Get(key) is not p.Get(key):\n tf.logging.warning('attention param {} overriding: {} -> {}'.format(\n key, params.Get(key), p.Get(key)))\n if params.name is not name:\n tf.logging.warning('attention param name overriding: {} -> {}'.format(\n params.name, name))\n params.name = name\n params.input_dim = p.input_dim\n params.hidden_dim = p.hidden_dim\n params.num_heads = num_heads\n params.atten_dropout_prob = p.atten_dropout_prob\n if isinstance(p.num_heads, list):\n params.proj_tpl.make_output_proj_no_op = True\n # Each dim per head is now divided among all heads\n dim_per_head = p.hidden_dim // sum(p.num_heads)\n params.proj_tpl.dim_per_head = dim_per_head\n params.dim_per_head = dim_per_head\n params.hidden_dim = p.hidden_dim // len(p.num_heads)\n return params\n\n if isinstance(p.num_heads, list):\n params_list = []\n for i in range(len(atten_tpl)):\n params = atten_tpl[i].Copy()\n params = _SetCommonParams(params, 'mixed_atten_{}'.format(i),\n p.num_heads[i])\n params_list.append(params)\n params = params_list\n else:\n params = atten_tpl.Copy()\n params = _SetCommonParams(params, 'multihead_atten', p.num_heads)\n return params", "def update_config(self, config):\n # Save a copy of the current config.\n newconfig = deepcopy(self.config)\n # Merge the new config into the current one.\n newconfig.merge(config)\n # Save the combined config as self.config, which triggers the traits\n # events.\n self.config = newconfig", "def hw_config(self, n):\n self._program += super().hw_config(n)\n self._check_pc()\n return", "def configure(self, game_config):\n self.num_players = game_config['game_num_players']", "def update_counts(self) -> None:\n ...", "def _on_config_msg(self, msg):\n self._config_updates.add(msg)", "def n_configs(self):\n return self._faux._n_configs", "def update_config(\n self,\n **spacy_training_config,\n ) -> None:\n self.config[\"training\"].update(spacy_training_config)", "def updateSizeHead(self, size): \n self.avatarConfiguration[\"headSize\"] = size\n self.paintHead()\n self.paintHair()\n if (self.avatarConfiguration[\"mask\"]):\n self.generateMask(\"imgUpload.png\")\n self.paintMask()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update a general parameter that's in the main info dictionary.
def update_general(info, key, val): info["model_params"][key] = val
[ "def update_parameter(self, param, val, force=False):\n self._update_dict[param] = val\n if force:\n self._cur_val[param] = None", "def update_params(self):", "def updateParam(self, name, value):\n params = self.params\n params[name]['value'] = value\n self.params = params", "def update_parameter(self, name, freq, value):\n if name not in self._parameters.keys():\n self.add_parameter(name, [freq], [value])\n else:\n param = self.get_parameter(name)\n param.update_value(freq, value)", "def update_param(self, update_param):\n\n self._update_param = update_param", "def update_algo_parameter(self, parameter_name, new_parameter_value):\n self.algo.update_algo_parameter(parameter_name, new_parameter_value)", "def update_param_info(param_info, config, is_user_config=False):\n if 'parameters' not in config:\n return\n params = config['parameters']\n for name in params:\n val = params[name]\n if not is_user_config:\n # If this is not a user-provided configuration, we disallow parameter redefinition.\n if name in param_info:\n raise ConfigurationError(\n \"Parameter info update error.\"\n \" Parameter redefinition is not allowed for non-user configuration.\"\n \" This is a system configuration error that must not happen.\"\n \" Parameter %s=%s, new parameter definition (value) is %s\" % (name, str(param_info[name]), val)\n )\n if isinstance(val, dict):\n # This is a complete parameter definition with name, value and description.\n if 'val' not in val:\n raise ConfigurationError(\n \"Parameter info update error.\"\n \" Parameter that is defined by a dictionary must contain 'val' field that\"\n \" defines its default value. Found this definition: %s=%s\" % (name, val)\n )\n if name not in param_info:\n param_info[name] = copy.deepcopy(val) # New parameter, set it info object.\n # TODO what about parameter type and description?\n else:\n logging.warn(\n \" Parameter (%s) entirely redefines existing parameter (%s).\"\n \" Normally, only value needs to be provided.\"\n \" We will proceed but you may want to fix this.\",\n json.dumps(val),\n json.dumps(param_info[name])\n )\n param_info[name]['val'] = val['val'] # Existing parameter from user configuration, update its value\n else:\n # Just parameter value\n val_type = 'str' if isinstance(val, basestring) or isinstance(val, list) else type(val).__name__\n if name not in param_info:\n param_info[name] = {\n 'val': val,\n 'type': val_type,\n 'desc': \"No description for this parameter provided (it was automatically converted from its value).\"\n }\n else:\n param_info[name]['val'] = val\n # Do final validations\n if 'type' in param_info[name] and param_info[name]['type'] not in ('int', 'str', 'float', 'bool'):\n raise ConfigurationError(\n \"Parameter info update error.\"\n \" Parameter has invalid type = '%s'.\"\n \" Parameter definition is %s = %s\" % (param_info[name]['type'], name, param_info[name])\n )\n if 'type' not in param_info[name] or 'desc' not in param_info[name]:\n logging.warn(\n \"Parameter definition does not contain type ('type') and/or description ('desc').\"\n \" You should fix this. Parameter definition is\"\n \" %s = %s\", name, param_info[name]\n )", "def UpdateParameters(self, param):\n\n for i, attribute in enumerate(self._fit_key.keys()):\n if attribute in param.keys():\n # Set attribute according to if it is a range or not\n if ';' in self._fit_key[attribute]:\n varmin = float(min(self._fit_key[attribute].split(';')))\n varmax = float(max(self._fit_key[attribute].split(';')))\n var = ROOT.RooRealVar(\n attribute,\n attribute,\n varmin,\n varmax)\n param[attribute] = var\n else:\n param[attribute] = float(self._fit_key[attribute])\n\n info(\n 'Change default value of {} (= {}) for signal PDF'\n .format(attribute, self._fit_key[attribute]))", "def update_params(self, d):\n for k, v in d.items():\n if k in self[\"parameters\"]:\n self[\"parameters\"][k].update(v)", "def updateParameters(self, parameters):\r\n super(Tool, self).updateParameters(parameters)", "def _update_params(self):\n pass", "def updateParamAnypoint(self):\n self.parent.ui.lineEdit_beta.setText(\"%.2f\" % self.parent.beta)\n self.parent.ui.lineEdit_alpha.setText(\"%.2f\" % self.parent.alpha)\n self.parent.ui.spinBox_zoom.setValue(self.parent.zoom)", "def _update_parameter_map(self):\n\n self._position_to_param = []\n self._mw_kwargs = {}\n for p in self._mw_fit_parameters.keys():\n if self._mw_fit_parameters[p].fixed:\n self._mw_kwargs[p] = self._mw_fit_parameters[p].value\n else:\n self._mw_kwargs[p] = None\n self._position_to_param.append(p)\n\n self._mw_kwargs.update(self._mw_other_arguments)", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def update_values(self, to_update):\n for key, value in kwargs.iteritems():\n self.params[key] = value\n # update the possibly dependent parameters\n self.set_filenames()", "def edit_parameter(request, parameter, **_kwargs):\n pass", "def update_params(self):\r\n parameters = dict()\r\n # Take the first value for all parameters\r\n for key, value in self.total_params.items():\r\n parameters[key] = value[0]\r\n # Update model\r\n self.model = self.inst(random_state=RANDOM_SEED, **parameters)", "def post_parameter_update(self) -> None:", "def update_parameter(self, name: str, value: tp.Any) -> None:\n\n assert self._scope_params is not None\n\n if name not in self._scope_params:\n raise ValueError(f\"types.Parameter {name} not found in {self}.\")\n\n if self.is_initializing():\n return\n\n parameter = self._scope_params[name]\n assert isinstance(parameter, types.Parameter)\n parameter.value = value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct generalized extreme value distribution. The parameters `loc`, `scale`, and `concentration` must be shaped in a way that supports broadcasting (e.g. `loc + scale` + `concentration` is valid).
def __init__(self, loc, scale, concentration, validate_args=False, allow_nan_stats=True, name='GeneralizedExtremeValue'): parameters = dict(locals()) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([loc, scale, concentration], dtype_hint=tf.float32) loc = tensor_util.convert_nonref_to_tensor( loc, name='loc', dtype=dtype) scale = tensor_util.convert_nonref_to_tensor( scale, name='scale', dtype=dtype) concentration = tensor_util.convert_nonref_to_tensor( concentration, name='concentration', dtype=dtype) dtype_util.assert_same_float_dtype([loc, scale, concentration]) # Positive scale is asserted by the incorporated GEV bijector. self._gev_bijector = gev_cdf_bijector.GeneralizedExtremeValueCDF( loc=loc, scale=scale, concentration=concentration, validate_args=validate_args) # Because the uniform sampler generates samples in `[0, 1)` this would # cause samples to lie in `(inf, -inf]` instead of `(inf, -inf)`. To fix # this, we use `np.finfo(dtype_util.as_numpy_dtype(self.dtype).tiny` # because it is the smallest, positive, 'normal' number. super(GeneralizedExtremeValue, self).__init__( distribution=uniform.Uniform( low=np.finfo(dtype_util.as_numpy_dtype(dtype)).tiny, high=tf.ones([], dtype=dtype), allow_nan_stats=allow_nan_stats), # The GEV bijector encodes the CDF function as the forward, # and hence needs to be inverted. bijector=invert_bijector.Invert( self._gev_bijector, validate_args=validate_args), parameters=parameters, name=name)
[ "def gaussian(mu, wid, x):\n return np.exp(-((x - mu) / (0.6005612 * wid))**2)", "def func_full_exp(x, c1, c2, c3, c4, c5, c6, c7):\n x = np.power(10, x)\n thermalCore = c1 * np.sqrt(x) * np.exp(-c2 * x)\n a = map(lambda y: 0 if y < c5 else 1, x)\n b = map(lambda y: 0 if y < c6 else 1, x)\n #b1 = map(lambda y: 1 - y, b)\n a = np.array(a)\n b = np.array(b)\n b1 = 1.0 - b\n powerLaw = c3 * a * np.power(x, -c4) * (b1 + b * np.exp(-c7 * (x - c6)))\n #print thermalCore + powerLaw\n return np.log10(thermalCore + powerLaw)", "def gaussian(x,amp,cen,wid):\n \n z = (x-cen)/wid \n return amp*np.exp(-z**2/2.)", "def _gaussian(self, c, sigma):\n d = 2*pi*sigma*sigma\n ax = exp(-power(self._neigx-c[0], 2)/d)\n ay = exp(-power(self._neigy-c[1], 2)/d)\n return outer(ax, ay) # the external product gives a matrix", "def _gaussian(self, c, sigma):\n d = 2*sigma*sigma\n ax = exp(-power(self._xx-self._xx.T[c], 2)/d)\n ay = exp(-power(self._yy-self._yy.T[c], 2)/d)\n return (ax * ay).T # the external product gives a matrix", "def make_supercell(coords, lattice, size, min_size=-5) -> np.ndarray:\n a, b, c = lattice\n\n xyz_periodic_copies = []\n xyz_periodic_copies.append(coords)\n min_range = -3 # we aren't going in the minimum direction too much, so can make this small\n max_range = 20 # make this large enough, but can modify if wanting an even larger cell\n\n for x in range(-min_range, max_range):\n for y in range(0, max_range):\n for z in range(0, max_range):\n if x == y == z == 0:\n continue\n add_vector = x*a + y*b + z*c\n xyz_periodic_copies.append(coords + add_vector)\n\n # Combine into one array\n xyz_periodic_total = np.vstack(xyz_periodic_copies)\n\n # Filter out all atoms outside of the cubic box\n new_cell = xyz_periodic_total[np.max(xyz_periodic_total[:,:3], axis=1) < size]\n new_cell = new_cell[np.min(new_cell[:,:3], axis=1) > min_size]\n\n return new_cell", "def do_extreme(self,magnitude='magnitude',tp_optional='tp_optional',direction_optional='direction_optional',tm_optional='tm_optional',water_depth_optional='water_depth_optional',\\\n args={'Fitting distribution':{'Weibull':True,'Gumbel':False,'GPD':False,'GEV':False},\n 'Method':{'pkd':False,'pwm':False,'mom':False,'ml':True},\n 'Slope fitting distribution':{'Weibull':True,'Gumbel':False},\n 'Slope treshold':0.005,\n 'Return period':[1,10,25,50,100],\n 'Estimate Hmax & Cmax RPVs':{'On':False,'Off':True},\n 'threshold type':{'percentile':True,'value':False},\n 'threshold value':95.0,\n 'Directional':{'On':True,'Off':False},\n 'Minimum number of peaks over threshold': 30,\n 'Minimum time interval between peaks (h)':24.0,\n 'Direction binning':{'centered':True,'not-centered':False},\n 'Direction interval': 45.,\n 'Time blocking':{'Annual':True,'Seasonal (South hemisphere)':False,'Seasonal (North hemisphere)':False,'Monthly':False},\n 'Display peaks':{'On':True,'Off':False},\n 'Display CDFs':{'On':True,'Off':False},\n 'Water depth':5000.0,\n 'folder out':os.getcwd()\n }):\n\n display_message()", "def getEllipsoidGaussian(x, y, z, c, v1, sigma1, v2, sigma2, v3, sigma3):\n c = np.array(c, dtype=float).flatten()\n v1 = np.array(v1, dtype=float).flatten()\n v1 /= np.sqrt(np.sum(v1**2))\n v2 = np.array(v2, dtype=float).flatten()\n v2 /= np.sqrt(np.sum(v2**2))\n v3 = np.array(v3, dtype=float).flatten()\n v3 /= np.sqrt(np.sum(v3**2))\n X = np.array([x, y, z]).T\n print(X.shape)\n X -= c[None, :]\n d1 = (X.dot(v1))**2/(sigma1**2)\n d2 = (X.dot(v2))**2/(sigma2**2)\n d3 = (X.dot(v3))**2/(sigma3**2)\n return np.exp(-d1)*np.exp(-d2)*np.exp(-d3)", "def eq_xscale(trng_dset, trgt_dset):\n pdb.set_trace()\n temp1 = add_x(trng_dset, start=1)\n temp2 = add_x(trgt_dset, start=1)\n a = temp1.shape[0]\n b = temp2.shape[0]\n lcm = a * b / fractions.gcd(a, b)\n x = lcm / a\n y = lcm / b\n temp1[:, 0] = temp1[:, 0].dot(x)\n temp2[:, 0] = temp2[:, 0].dot(y)\n output = interpolate(temp1), interpolate(temp2)\n return np.asarray(output)", "def InelasticThreshold(Ecut, Ethreshold, xsAtEcut):\n print(Ecut, Ethreshold, xsAtEcut)\n __the_const = xsAtEcut / math.sqrt(Ecut - Ethreshold)\n\n def __sqrtThingee(E, *args):\n if E < Ethreshold:\n return 0.0\n else:\n return __the_const * math.sqrt(E - Ethreshold)\n __this_grid = copy.copy(Egrid) + [Ecut, Ethreshold]\n __this_grid.sort()\n return XYs1d.XYs1d.createFromFunction(\n defaultAxes(xName='E', xUnit='eV', yName='Sigma(E)', yUnit='b'),\n Xs=__this_grid,\n func=__sqrtThingee,\n parameters=[],\n accuracy=accuracy,\n biSectionMax=20,\n checkForRoots=False,\n infill=1,\n safeDivide=1)", "def min(self, e, extra_constraints=(), signed=False, exact=None):\n raise NotImplementedError()", "def min_max_scale(X, full_data_range, scale_range):\n\n # Set range vars for proper scaling among all protein coordinate data\n full_data_min, full_data_max = full_data_range\n scale_min, scale_max = scale_range\n\n # Scale coordinate data based on scaling variables defined\n X_std = (X - full_data_min) / (full_data_max - full_data_min)\n X_scaled = X_std * (scale_max - scale_min) + scale_min\n\n return X_scaled", "def __new__(cls, minx, miny, minz, maxx, maxy, maxz):\n # Coerce bounds to floats, and nones to infs\n kwargs = locals()\n for b, inf in zip(('min', 'max'),\n (-np.inf, np.inf)):\n for axis in 'xyz':\n bound = b + axis\n value = kwargs[bound]\n kwargs[bound] = inf if value is None else float(value)\n \n kwargs.pop('cls') # must be passed positionally\n return super(cls, cls).__new__(cls, **kwargs)", "def bigaussian(mu, wid, x, m = 0.5):\n lx = x.shape[0]\n ix = np.where(x == mu)[0][0]\n \n y = np.ones(lx)\n y[0:ix] = gaussian(mu, wid * m, x[0:ix])\n y[ix+1:lx] = gaussian(mu, wid * (1 - m), x[ix+1:lx]) \n \n return y", "def __init__(self,\n loc,\n concentration,\n validate_args=False,\n allow_nan_stats=True,\n name=\"InverseGaussian\"):\n parameters = dict(locals())\n with tf.name_scope(name, values=[loc, concentration]):\n self._loc = tf.convert_to_tensor(loc, name=\"loc\")\n self._concentration = tf.convert_to_tensor(concentration,\n name=\"concentration\")\n with tf.control_dependencies([\n tf.assert_positive(self._loc),\n tf.assert_positive(self._concentration)] if validate_args else []):\n self._loc = tf.identity(self._loc, name=\"loc\")\n self._concentration = tf.identity(self._concentration,\n name=\"concentration\")\n tf.assert_same_float_dtype([self._loc, self._concentration])\n super(InverseGaussian, self).__init__(\n dtype=self._loc.dtype,\n reparameterization_type=tf.distributions.NOT_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n graph_parents=[self._loc, self._concentration],\n name=name)", "def ecoMinMaxConsts(model, gen_dict, prod_vars, on_vars, reserve_vars):\n #No on_vars for nonstandard generation.\n for name, iHr in on_vars.keys():\n reserve = grb.quicksum(reserve_vars[name, iHr, type] \n for type in generator.GenUnit.RESERVE_PRODUCTS)\n if gen_dict[name].eco_max[iHr] < TOL:\n model.addConstr(on_vars[name, iHr] == 0 )\n model.addConstr(prod_vars[name, iHr] == 0 )\n model.addConstr(reserve == 0)\n continue\n model.addConstr( on_vars[name, iHr] * gen_dict[name].eco_min[iHr] <=\n prod_vars[name, iHr] + reserve, \n name=\"EcoMin\" + name + \"H\" + str(iHr) ) \n model.addConstr( prod_vars[name, iHr] + reserve <= \n on_vars[name, iHr] * gen_dict[name].eco_max[iHr], \n name=\"Ecomax\" + name + \"H\" + str(iHr) )\n #you need to be on in order to offer spinning reserve\n if (name, iHr, \"TMSR_Cap\") in reserve_vars:\n model.addConstr( reserve_vars[name, iHr, \"TMSR_Cap\"] <= \n on_vars[name, iHr] * gen_dict[name].TMSR_Cap, \n name=\"SpinningReserve\" + \"name\" + str(iHr) )", "def gaussian(dims: Tuple[int, int], cutoff_freq: float) -> np.ndarray:\n # create grid\n m, n = [(dim - 1) / 2 for dim in dims]\n yy, xx = np.ogrid[-m : m + 1, -n : n + 1]\n\n # compute transfer function\n tf = np.exp(-(np.power(xx, 2) + np.power(yy, 2)) / (2 * np.power(cutoff_freq, 2)))\n\n # normalize and return transfer func\n return (tf - np.max(tf)) / (np.max(tf) - np.min(tf))", "def make_uniform_x(self, x_resolution, min_x = None, max_x = None, bin_above = 2.0, **kwargs):\n \n if min_x is None or max_x is None:\n a, b = self.get_min_max_x(**kwargs)\n if min_x is None:\n min_x = a\n if max_x is None:\n max_x = b\n \n new_x = numpy.arange(min_x, max_x + x_resolution / 2, x_resolution)\n \n for m in range(len(self.mess)):\n if m not in exclude and self.mess[m][\"class\"] not in exclude:\n resolution = (numpy.amax(m.x) - numpy.amin(m.x)) / len(m.x)", "def func_full(x, c1, c2, c3, c4, c5, c6, c7):\n thermalCore = c1 * np.sqrt(x) * np.exp(-c2 * x)\n a = map(lambda y: 0 if y < c5 else 1, x)\n b = map(lambda y: 0 if y < c6 else 1, x)\n #b1 = map(lambda y: 1 - y, b)\n a = np.array(a)\n b = np.array(b)\n b1 = 1.0 - b\n #powerLaw = c3 * a * np.power(x, -c4) * (b1 + b * np.exp(-c7*(x-c6)))\n powerLaw = 0.001 * a * np.power(x, -c4) * b1\n return thermalCore + powerLaw" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct Artillery YAML configuration
def set_yaml_config(self) -> None: # LT-248: We can pick Artillery Phase configuration from conf file self.yaml_config = { "config": { "target": self.get_swagger_url(), "processor": f"./{self.OUT_FILE}", "phases": [ { "duration": settings.DURATION or 1, "arrivalRate": settings.SPAWN_RATE or 1 } ] }, "scenarios": self.task_set.yaml_flow }
[ "def setupFromYml(self, yml):", "def __build_yaml(self):\n \n with open(self.mainConfigFile, \"r\") as f:\n self.configFiles = yaml.safe_load(f)\n\n self.yamlStream = \"# \" + self.find_file(self.configFiles['head'])+'\\n'\n with open(self.find_file(self.configFiles['head']), \"r\") as f:\n self.yamlStream = self.yamlStream + f.read() + '\\n'\n\n if 'definitions' in self.configFiles.keys():\n self.__append_yaml(self.configFiles['definitions'])\n\n if 'aircrafts' in self.configFiles.keys():\n self.yamlStream = self.yamlStream + \"aircrafts:\\n\"\n for filename in self.configFiles['aircrafts']:\n self.yamStream = self.yamlStream + ' - '\n self.__append_yaml(filename, prefix=' ')\n self.yamlStream = self.yamlStream + '\\n'", "def _make_builder_configs():\n release1 = TedliumReleaseConfig(\n name=\"release1\",\n description=\"\"\"\\\n The TED-LIUM corpus is English-language TED talks, with transcriptions,\n sampled at 16kHz. It contains about 118 hours of speech.\n\n This is the TED-LIUM corpus release 1,\n licensed under Creative Commons BY-NC-ND 3.0\n (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en).\n \"\"\",\n citation=\"\"\"\\\n @inproceedings{rousseau2012tedlium,\n title={TED-LIUM: an Automatic Speech Recognition dedicated corpus},\n author={Rousseau, Anthony and Del{\\\\'e}glise, Paul and Est{\\\\`e}ve, Yannick},\n booktitle={Conference on Language Resources and Evaluation (LREC)},\n pages={125--129},\n year={2012}\n }\n \"\"\",\n url=\"https://www.openslr.org/7/\",\n download_url=\"http://www.openslr.org/resources/7/TEDLIUM_release1.tar.gz\",\n split_paths=[\n (tfds.Split.TRAIN, os.path.join(\"TEDLIUM_release1\", \"train\")),\n (tfds.Split.VALIDATION, os.path.join(\"TEDLIUM_release1\", \"dev\")),\n (tfds.Split.TEST, os.path.join(\"TEDLIUM_release1\", \"test\")),\n ],\n )\n\n release2 = TedliumReleaseConfig(\n name=\"release2\",\n description=\"\"\"\\\n This is the TED-LIUM corpus release 2,\n licensed under Creative Commons BY-NC-ND 3.0\n (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en).\n\n All talks and text are property of TED Conferences LLC.\n\n The TED-LIUM corpus was made from audio talks and their transcriptions\n available on the TED website. We have prepared and filtered these data\n in order to train acoustic models to participate to the International\n Workshop on Spoken Language Translation 2011 (the LIUM English/French\n SLT system reached the first rank in the SLT task).\n\n Contains 1495 talks and transcripts.\n \"\"\",\n citation=\"\"\"\\\n @inproceedings{rousseau2014tedlium2,\n title={Enhancing the {TED-LIUM} Corpus with Selected Data for Language Modeling and More {TED} Talks},\n author={Rousseau, Anthony and Del{\\\\'e}glise, Paul and Est{\\\\`e}ve, Yannick},\n booktitle={Conference on Language Resources and Evaluation (LREC)},\n year={2014}\n }\n \"\"\",\n url=\"https://www.openslr.org/19/\",\n download_url=(\n \"http://www.openslr.org/resources/19/TEDLIUM_release2.tar.gz\"\n ),\n split_paths=[\n (tfds.Split.TRAIN, os.path.join(\"TEDLIUM_release2\", \"train\")),\n (tfds.Split.VALIDATION, os.path.join(\"TEDLIUM_release2\", \"dev\")),\n (tfds.Split.TEST, os.path.join(\"TEDLIUM_release2\", \"test\")),\n ],\n )\n\n release3 = TedliumReleaseConfig(\n name=\"release3\",\n description=\"\"\"\\\n This is the TED-LIUM corpus release 3, licensed under Creative Commons\n BY-NC-ND 3.0.\n\n All talks and text are property of TED Conferences LLC.\n\n This new TED-LIUM release was made through a collaboration between the\n Ubiqus company and the LIUM (University of Le Mans, France)\n\n Contents:\n\n - 2351 audio talks in NIST sphere format (SPH), including talks from\n TED-LIUM 2: be careful, same talks but not same audio files (only\n these audio file must be used with the TED-LIUM 3 STM files)\n - 452 hours of audio\n - 2351 aligned automatic transcripts in STM format\n - TEDLIUM 2 dev and test data: 19 TED talks in SPH format with\n corresponding manual transcriptions (cf. 'legacy' distribution below).\n - Dictionary with pronunciations (159848 entries), same file as the one\n included in TED-LIUM 2\n - Selected monolingual data for language modeling from WMT12 publicly\n available corpora: these files come from the TED-LIUM 2 release, but\n have been modified to get a tokenization more relevant for English\n language\n\n Two corpus distributions:\n - the legacy one, on which the dev and test datasets are the same as in\n TED-LIUM 2 (and TED-LIUM 1).\n - the 'speaker adaptation' one, especially designed for experiments on\n speaker adaptation.\n \"\"\",\n citation=\"\"\"\\\n @inproceedings{hernandez2018tedlium3,\n title={TED-LIUM 3: twice as much data and corpus repartition for experiments on speaker adaptation},\n author={Hernandez, Fran{\\\\c{c}}ois and Nguyen, Vincent and Ghannay, Sahar and Tomashenko, Natalia and Est{\\\\`e}ve, Yannick},\n booktitle={International Conference on Speech and Computer},\n pages={198--208},\n year={2018},\n organization={Springer}\n }\n \"\"\",\n url=\"https://www.openslr.org/51/\",\n download_url=\"http://www.openslr.org/resources/51/TEDLIUM_release-3.tgz\",\n split_paths=[\n (\n tfds.Split.VALIDATION,\n os.path.join(\"TEDLIUM_release-3\", \"legacy\", \"dev\"),\n ),\n (\n tfds.Split.TEST,\n os.path.join(\"TEDLIUM_release-3\", \"legacy\", \"test\"),\n ),\n # The legacy/train directory contains symlinks to \"data\",\n # which are skipped by extraction (see above).\n # Work around this by manually dereferencing the links here.\n (tfds.Split.TRAIN, os.path.join(\"TEDLIUM_release-3\", \"data\")),\n ],\n )\n\n return [release1, release2, release3]", "def yamlConfigForParsingPlugins():\n parameters = \"\"\"\njoinPaths: !joinPaths\n - a\n - b\n - \"c\"\nrunPageTemplates: !findRunPageTemplates\n - \"templates\"\nbcrypt: !bcrypt\n bcryptLogRounds: 12\n user: \"pass\"\nbcryptNoUser: !bcrypt\n bcryptLogRounds: 12\n null: null\nsecretKey: !secretKey 12345\nsecretKeyGen: !secretKey null\n \"\"\"\n # Load parameters\n parameters = yaml.load(parameters, Loader = yaml.SafeLoader)\n return parameters", "def create_yaml(self):\n if self._language == PYTHON:\n language_str = 'python'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._python_dependencies()\n elif self._language == NODE:\n language_str = 'node'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._node_dependencies()\n elif self._language == DOTNET:\n language_str = 'dotnet'\n package_route = '$(System.DefaultWorkingDirectory)/publish_output/s'\n dependencies = self._dotnet_dependencies()\n elif self._language == POWERSHELL:\n language_str = 'powershell'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._powershell_dependencies()\n else:\n raise LanguageNotSupportException(self._language)\n\n if self._app_type == WINDOWS:\n platform_str = 'windows'\n yaml = self._generate_yaml(dependencies, 'VS2017-Win2016', language_str, platform_str, package_route)\n else:\n platform_str = 'linux'\n yaml = self._generate_yaml(dependencies, 'ubuntu-16.04', language_str, platform_str, package_route)\n\n with open('azure-pipelines.yml', 'w') as f:\n f.write(yaml)", "def setup_yaml_parser():\n var = re.compile(r\".*\\$\\{.*\\}.*\", re.VERBOSE)\n yaml.add_constructor('!env_var', _env_var_constructor)\n yaml.add_implicit_resolver('!env_var', var)", "def generate_config():\n\n return {\n \"email_subject\": DEFAULT_EMAIL_SUBJECT,\n \"from_email\": DEFAULT_FROM_EMAIL,\n \"to_email\": DEFAULT_TO_EMAIL,\n \"url\": DEFAULT_URL,\n \"start_value\": DEFAULT_START_VALUE,\n \"look_ahead\": DEFAULT_LOOK_AHEAD,\n \"slide_window\": DEFAULT_SLIDE_WINDOW,\n }", "def build_configs():", "def user_create_yaml(self):\n pass", "def config_enclosure() -> dict:\n with open(get_test_file_path('pygeoapi-test-config-enclosure.yml')) as fh:\n return yaml_load(fh)", "def _generate_configs(self):\n return _generate_anchor_configs(self.min_level, self.max_level,\n self.num_scales, self.aspect_ratios)", "def get_yaml_editor(**kwargs: Any) -> YAML:\r\n explicit_start = kwargs.pop(\"explicit_start\", True)\r\n explode_aliases = kwargs.pop(\"explode_aliases\", False)\r\n preserve_quotes = kwargs.pop(\"preserve_quotes\", True)\r\n\r\n # The ruamel.yaml class appears to be missing some typing data, so\r\n # these valid assignments cannot be type-checked.\r\n yaml = YAML()\r\n\r\n yaml.indent(mapping=2, sequence=4, offset=2)\r\n yaml.explicit_start = explicit_start # type: ignore\r\n yaml.preserve_quotes = preserve_quotes # type: ignore\r\n yaml.width = maxsize # type: ignore\r\n\r\n if explode_aliases:\r\n yaml.default_flow_style = False\r\n\r\n return yaml", "def get_configured_yaml() -> ModuleType:\n import yaml\n\n from manubot.cite.csl_item import CSL_Item\n\n yaml.add_representer(str, _yaml_str_representer)\n # CSL_Item: pyyaml chokes on dict subclass\n # https://github.com/yaml/pyyaml/issues/142\n # https://stackoverflow.com/a/50181505/4651668\n yaml.add_representer(\n CSL_Item,\n lambda dumper, data: dumper.represent_mapping(\n tag=\"tag:yaml.org,2002:map\", mapping=data.items()\n ),\n )\n return yaml", "def _create_yaml_map(self):", "def celery_config() -> Dict:\n with open(script_dir + 'config.yml', 'r') as yamlfile:\n cfg = yaml.load(yamlfile, Loader=yaml.SafeLoader)\n celery_cfg = cfg['celery']\n result = {\n 'main': celery_cfg['main'],\n 'broker': celery_cfg['broker_url'],\n 'backend': celery_cfg['backend_url'],\n }\n return result", "def config_maker(args):\n\n dico = dict()\n dico['fastq'] = args.fastq\n dico['path_fast5'] = args.fast5\n if args.summary:\n dico['summary'] = args.summary\n else:\n dico['summary'] = False\n dico['splitted'] = args.splitted\n dico['reference'] = args.reference\n if args.out:\n dico['output'] = args.out\n\n return dico", "def init_yaml(self):\n f = open(\"content/\"+self.match_name+'.md',\"r\")\n text = f.read().split('---')[0]\n self.yaml = yaml.load(text, Loader=Loader)", "def _parse_yaml_configs(args, anon_component_prefix=\"anon_app\"):\n # Configuration files are basically nested dictionaries and the command-line arguments\n # are a list with each element being a dictionary. If the dict in the args has the key\n # 'class', then it is anonymous and we should just give it a sequential unique name to\n # ensure it is run. If, however, it does not, then we should assume that it's a NAMED\n # configuration and so we can actually use that to overwrite/modify the configurations\n # pulled in from a file.\n\n new_configs = {}\n for arg in args:\n try:\n arg = yaml.load(arg)\n except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:\n raise ValueError(\"error parsing manual configuration: %s\\nError:%s\" % (arg, e))\n\n # If this config is anonymous, give it a unique name and add it to configs\n # since it couldn't possibly overwrite another config entry.\n # NOTE: if user specified a 'name' entry directly, we will still take that later on...\n if 'class' in arg:\n # TODO: perhaps register these names somewhere to ensure uniqueness?\n global __scale_client_n_anon_apps_added__\n unique_key = anon_component_prefix + str(__scale_client_n_anon_apps_added__)\n __scale_client_n_anon_apps_added__ += 1\n new_configs[unique_key] = arg\n else:\n try:\n new_configs.update(arg)\n except TypeError as e:\n raise ValueError(\"error in your manual configuration: %s\\n\"\n \"couldn't be interpreted as a dict due to error: %s\" % (arg, e))\n\n return new_configs", "def devpiserver_genconfig(tw, config, argv, writer):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tell if a person if allergic to the given allergen.
def is_allergic_to(self, allergen): return allergen in self.list
[ "def is_allergen(self, is_allergen):\n\n self._is_allergen = is_allergen", "def is_girl(self):\n if self.gneder == self.GIRL: return True;", "def in_garden(obj):\n print(\"Searching the garden's random objects\")\n return obj in _random_objects", "def isrelatierekening(self, rekening):\n if type(rekening) == str or type(rekening) == unicode:\n return rekening in self.ledenrek or rekening in self.olvrek or rekening in self.externrek\n return rekening.naam in self.ledenrek or rekening.naam in self.olvrek or rekening.naam in self.externrek", "def update_certain_knowledge(self):\n NeedToCheck = True\n while NeedToCheck:\n NeedToCheck = False\n\n # look through each allergen_possibility, ignoring anything in known_allergens\n for this_allergen in [\n a\n for a in self.allergen_possibilities.keys()\n if a not in self.known_allergens\n ]:\n possibility_count = len(self.allergen_possibilities[this_allergen])\n if 1 == possibility_count:\n the_definite_ingredient = list(\n self.allergen_possibilities[this_allergen]\n )[0]\n # ok, let's go boys.. we've got a new fact!\n print(\n f\"We now know that ingredient {the_definite_ingredient} has the allergen {this_allergen}\"\n )\n # we can add this to known_allergens list\n self.known_allergens[this_allergen] = the_definite_ingredient\n\n # we must remove this ingredient as an option from all the other allergens in the known universe\n for target_allergen in self.allergen_possibilities:\n if this_allergen != target_allergen:\n if (\n the_definite_ingredient\n in self.allergen_possibilities[target_allergen]\n ):\n self.allergen_possibilities[target_allergen].remove(\n the_definite_ingredient\n )\n # and we need to go round again..\n NeedToCheck = True", "def add(self, alergen: Alergen) -> bool:\n pass", "def eligiblePresident(age,bornInHomeland):\n return (age>=35) and bornInHomeland", "def is_ligand(self):\n if any(LigandComponentAdaptor().fetch_by_residue_id(r.residue_id) for r in self.Residues):\n return True\n else:\n return False", "def lent_out(self):\n return self in Book.on_loan", "def is_fullgen(nom, gen, n=2): \n if nom[:n] == gen[:n]:\n return True\n else:\n if gen in ('oris'):\n return True\n else:\n return False", "def _bot_assigned_bell(self, bell: Bell) -> bool:\n return self._tower.is_bell_assigned_to(bell, self._user_name)", "def __in__(self, grilles):\n for grille in grilles:\n if self == grille:\n return 1\n return 0", "def is_bothell_student():\n return _is_member('uw_affiliation_bothell-student')", "def satisfies(self, reg):\n ### If no value, there is no need for filtering\n if self.getValues()==['']:\n return True\n affiliation = self.getValues()[0]\n return True if (affiliation == reg.getRepresentationType()[\"organizationRepresentative\"]) else False", "def is_lucky(chance):\n return randint(0, 100) <= chance", "async def get_guardian_email(guardian_id: UUID, angel_name: str) -> str:\n try:\n user = await User.get(id=guardian_id)\n except DoesNotExist:\n return False\n\n angels = await user.fetch_related(\"angels\")\n for angel in angels:\n if angel.name == angel_name:\n return user.email\n return False", "def test_check_birth(self):\n herb = Fa.Herbivore(weight=60, age=20)\n herb2 = Fa.Herbivore(weight=33.24, age=2)\n carn = Fa.Carnivore(weight=60, age=20)\n print(min(1, herb.p['gamma'] * herb.fitness*(4 - 1))) # 0.58\n assert herb.check_birth(1) is False\n rd.seed(11) # rd.random() = 0.45\n assert herb.check_birth(4) is True\n assert isinstance(herb.check_birth(40), bool)\n assert herb2.check_birth(100) is False\n assert carn.check_birth(6) is True", "def is_any_mentor_became_human(self):\n for mentor in self.mentors:\n if mentor.humanity_level >= 10:\n print(\"\\033[44m\"+mentor.first_name, mentor.last_name+\" called \"+ mentor.nickname+\" has become human \"\n \"Is ready to deliver to new Codecool facility!\", mentor.first_name, mentor.last_name,\n \"may the Force be with You!\\033[0m\")\n time.sleep(3)\n return True\n return False", "def _user_assigned_bell(self, bell: Bell) -> bool:\n return not self._bot_assigned_bell(bell)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This returns a single entry corresponding to the Directory Entity referred to by FolderEntityData. The returned string is given below (between Start and End) Start
def getFolderEntry(FolderEntityData): if FolderEntityData.Type not in ['IntermediateDir', 'ExperimentDir']: errprint('\nThe given EntityData does not represent the data of a directory') raise ValueError OutputLines = [] OutputLines.append("FolderID : {UID}".format(UID=FolderEntityData.ID)) OutputLines.append("ParentFolderID : {UID}".format(UID=FolderEntityData.ParentID)) OutputLines.append("FolderType : {Type}".format(Type=FolderEntityData.Type)) OutputLines.append("FolderTitle : {Title}".format(Title=FolderEntityData.Title)) OutputLines.append("FolderDescription: |-2") OutputLines += [" "+Line for Line in FolderEntityData.Description.splitlines()] OutputLines.append("") return "\n".join(OutputLines)
[ "def getFolderItemName(self) -> unicode:\n ...", "def folder_key(title,folder_name=DEFAULT_FOLDER_NAME):\n #parameter order is reversed because of kwargs necessities :(\n #i dont use this atm\n return ndb.Key('Folder', folder_name,'File',title)", "def folder_key(self):\n return self._folder_key", "def directory_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"directory_id\")", "def _get_ds_name_folder_path(self, backing):\n vmdk_ds_file_path = self.volumeops.get_path_name(backing)\n (datastore_name,\n folder_path, _) = volumeops.split_datastore_path(vmdk_ds_file_path)\n return (datastore_name, folder_path)", "def fl_get_folder_name(ptr_flobject):\n _fl_get_folder_name = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_folder_name\",\n xfdata.STRING, [cty.POINTER(xfdata.FL_OBJECT)],\n \"\"\"const char * fl_get_folder_name(FL_OBJECT * ob)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.keep_elem_refs(ptr_flobject)\n retval = _fl_get_folder_name(ptr_flobject)\n if isinstance(retval, bytes):\n return retval.decode('utf-8')\n else: # str\n return retval", "def path(self):\n return self._dir_entry.path", "def ProjectFolderId(self):\n return self.raw_project_folder_data.get(\"ProjectFolderId\")", "def _getdescription(self):\r\n result = self._session.execute(\"query -t folder -n %s -i %s -u -f \\\"%%description\\\"\" % (self.name, self.instance))\r\n return result.output.strip()", "def get_relative_name(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetRelativeName', self.handle)", "def _get_root_metadata(self):\n r, rx_dict = self._do_request(\n 'get',\n http_server_utils.join_url_components(\n [self._api_drive_endpoint_prefix, 'listfolder']),\n params={'folderid': 0})\n return rx_dict['metadata']", "def _get_folder(self):\n # type: () -> str\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n response = self.connection.api_call(\n \"GET\", [\"v1\", \"resources\", self.id, \"folderpath\"], headers=headers\n )\n\n return response.json().get(\"path\")", "def folder_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"folder_id\")", "def _summarize_home_folder_into_string(self, folder_id):\n subfolders, files = self._get_home_folder_contents()\n \n str_list = [\n # List all subfolders\n *[str(index) + \". \" + item['title'] for index, item in enumerate(subfolders, 1)],\n \n # An empty space to separate folder from files\n \"\",\n \n # List all files\n *[str(index) + \". \" + item['title'] for index, item in enumerate(files, len(subfolders) + 1)],\n ]\n \n str_reply = \"\\n\".join(str_list)\n return str_reply", "def Directory(self) -> str:", "def get_dcim_folder(device_pidl, parent):\r\n device_name = parent.GetDisplayNameOf(device_pidl, shellcon.SHGDN_NORMAL)\r\n name = None\r\n pidl = None\r\n\r\n folder = parent.BindToObject(device_pidl, None, shell.IID_IShellFolder)\r\n try:\r\n top_dir_name = \"\"\r\n for pidl in folder.EnumObjects(0, shellcon.SHCONTF_FOLDERS):\r\n top_dir_name = folder.GetDisplayNameOf(pidl, shellcon.SHGDN_NORMAL)\r\n break # Only want to see the first folder.\r\n if top_dir_name != \"Internal Storage\":\r\n return None, None, device_name\r\n except pywintypes.com_error:\r\n return None, None, device_name # No problem, must not be an iPhone\r\n\r\n folder = folder.BindToObject(pidl, None, shell.IID_IShellFolder)\r\n for pidl in folder.EnumObjects(0, shellcon.SHCONTF_FOLDERS):\r\n name = folder.GetDisplayNameOf(pidl, shellcon.SHGDN_NORMAL)\r\n break # Only want to see the first folder.\r\n if name != \"DCIM\":\r\n logger.warning(\"%s's '%s' has '%s', not a 'DCIM' dir.\" %\r\n (device_name, top_dir_name, name))\r\n return None, None, device_name\r\n\r\n return pidl, folder, device_name", "def _lookup_used_entity_id(self, file_details):\n # Since this uses the response from POST to /files/ this will include the ancestors and not be\n # effected by exclude_response_fields that were used when listing the project\n name_parts = [ancestor['name'] for ancestor in file_details['ancestors']\n if ancestor['kind'] == KindType.folder_str]\n name_parts.append(file_details['name'])\n remote_path = RemotePath.add_leading_slash(os.sep.join(name_parts))\n return self.activity.remote_path_to_file_version_id[remote_path]", "def get_folder_short_name_for_location(self, location):\n _method_name = 'get_folder_short_name_for_location'\n _logger.entering(location.get_folder_path(), class_name=_class_name, method_name=_method_name)\n folder_dict = self.__get_dictionary_for_location(location, False)\n result = ''\n if SHORT_NAME in folder_dict:\n result = folder_dict[SHORT_NAME]\n _logger.exiting(class_name=_class_name, method_name=_method_name, result=result)\n return result", "def get_foldername(self):\n return self.logfolder" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This returns a single entry corresponding to the Experiment Entity referred to by ExpEntityData. The returned string is given below (between Start and End) Start
def getExperimentEntry(ExpEntityData): # Validate that ExpEntityData actually corresponds to an Experiment Entity if ExpEntityData.Type != 'Experiment': errprint("\nThe Entity Data does not represent the data of an experiment") raise ValueError OutputLines = [] OutputLines.append("") OutputLines.append("- ID : {ID}".format(ID=ExpEntityData.ID)) OutputLines.append(" Title : {Title}".format(Title=ExpEntityData.Title)) OutputLines.append(" Description: |-2") OutputLines += [" "+Line for Line in ExpEntityData.Description.splitlines()] OutputLines.append("") OutputLines.append( "{0:#<100}".format("## End of Experiment {UID} ".format(UID=ExpEntityData.ID))) return "\n".join(OutputLines)
[ "def entity_description(self, eid):\n entities = self._load_entities()\n return entities[eid][\"description\"]", "def getEntity(self):\n\n fid = file(self.filename)\n entityre = re.compile(\"entity (\\w+) is\", re.IGNORECASE)\n\n matches = entityre.search(fid.read())\n self.entityname = matches.groups()[0]\n return self.entityname", "def print_entity(entity):\n print 'entity.original_text:', entity.original_text\n print 'entity.display_text:', entity.display_text\n print 'entity.display_html:', entity.display_html\n print 'entity.start_index:', entity.start_index\n print 'entity.end_index:', entity.end_index", "def entity_tostring(entity):\n\n metadata = \", \".join(['\"%s\": \"%s\"' % (key, value) for\n key, value in entity.metadata.items()])\n\n mentions = \", \".join(['\"%s\"' % mention for mention in entity.mentions])\n\n return ('{name: \"%s\",'\n ' type: \"%s\",'\n ' metadata: {%s},'\n ' salience: %s,'\n ' mentions: [%s]}') % (\n entity.name,\n entity.type,\n metadata,\n entity.salience,\n mentions)", "def _getEntityEndKey(entityId):\n return \"%s\\x1E\" % entityId", "def _getEntityStartKey(entityId):\n return \"%s\\x1D\" % entityId", "def extent(obj):\n return obj.get('startOffset', -1), obj.get('endOffset', -1)", "def describe_entry(e, fields):\n from operator import getitem\n from six.moves import reduce\n return \" \".join([reduce(getitem, f.split('.'), e)\n for f in fields])", "def main_entity_of_page(self) -> str:\n return self._main_entity_of_page", "def entity_name(self):\n return self.entity.name", "def details(self):\n return self.request(\"/details.json\")[\"Response\"][\"Data\"][\"Entity\"]", "def get_data(self):\n # Check if we have downloaded experiment list yet\n if not self.experiment_container:\n self.populate_from_db()\n # Retrieve experiment results from database\n measurement_container = list(map(list, zip(*self.experiment_container)))[1]\n #Return all experiment IDs and experimental data associated with this entity\n #replace db.Experiment.ID with db.Experiment.type to index by experiment names not experiment IDs\n stm = cfg.session.query(db.Experiment.type,db.Measurement.data)\\\n .select_from(db.Measurement) \\\n .join(db.Object).join(db.Entity).join(db.Experiment)\\\n .filter(db.Measurement.ID.in_(measurement_container))\n return stm.all()", "def test_entity(self):\n self.request.log(\"Hello World\", entities=(Entity(1337)(12, \"Demo\"),))\n self.request.end()\n entry = self.get_entry()\n assert 'entities' in entry\n assert len(entry['entities']) == 1\n assert entry['entities'][0] == dict(entity=1337, id=12, name=\"Demo\")", "def __str__(self) -> str:\n st = \"<Entity>: \\n{\\n\"\n for k, v in self._keys.items():\n if not isinstance(v, list):\n st += f\"\\t {k} = \\\"{v}\\\"\\n\"\n if self._fixup is not None:\n for k, v in self.fixup.items():\n st += f\"\\t ${k} = \\\"{v}\\\"\\n\"\n\n for out in self.outputs:\n st += f'\\t{out!s}\\n'\n st += \"}\\n\"\n return st", "def get_description(self):\n return \"It is an Entity.\"", "def entity_name(self) -> str:\n return self._entity_name", "def exp_metadata(self) -> LabExperiment:\n\n return self._exp_metadata", "def entity_name(self):\n return self.__entity_name", "def commandGetEntitySummary(entityClient: EntityClient, entity: str):\n demisto.debug('commandGetEntitySummary has been called.')\n\n result: Dict[str, Any] = entityClient.getEntitySummary(entity)\n\n prefix = 'Insight.Entity.Summary'\n key = 'summary'\n\n if not result:\n raise Exception(f'We receive an invalid response from the server ({result})')\n\n if key not in result:\n raise Exception(f'We receive an invalid response from the server (The response does not contains the key: {key})')\n\n if not result.get(key):\n return \"We could not find any result for Get Entity Summary.\"\n\n return CommandResults(\n outputs_prefix=prefix,\n outputs_key_field=key,\n outputs=result.get(key)\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get all the employees out of the database
def get_employees(self): from Employee import Employee cursor = self.dbconnect.get_cursor() cursor.execute('select * from employee') employees = list() for row in cursor: employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]) employees.append(employee) return employees
[ "def get_employees():\n employees = list()\n try:\n connection = DBConnection.getConnection()\n cursor = connection.cursor()\n cursor.execute(\"select * from employee;\")\n rows = cursor.fetchall()\n connection.commit()\n for data in rows:\n emp_id = data[0]\n name = data[1]\n designation_code = data[2]\n dob = data[3]\n day = dob.strftime(\"%d\")\n month = dob.strftime(\"%m\")\n year = dob.strftime(\"%Y\")\n salary = data[4]\n gender = data[5]\n indian = data[6]\n pan_no = data[7]\n aadhar = data[8]\n employee = Employee(emp_id, name, designation_code, day,\n month, year, salary, gender, indian, pan_no, aadhar)\n employees.append(employee)\n except Error as error:\n raise DataLayerError(message=error.msg)\n finally:\n try:\n if cursor.is_open():\n cursor.close()\n if connection.is_connected():\n connection.close()\n except:\n pass\n return employees", "def get_employees(self):\n self.employee_list = []\n try:\n employees = self.db['employees'].all()\n # loop through what we get back from DB\n for emp in self.db['employees']:\n self.employee_list.append(\n employee.Employee(int(emp['id']), str(emp['name']), str(emp['password']), int(emp['role'])))\n except:\n print(\"error\")\n self.statusbar.showMessage(\"Error loading employee data\", 4000)", "def get_employees(self):\n for employees in self.employees:\n return employees", "def get_all_employee():\r\n conn = sqlite3.connect('test2.db')\r\n try:\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT * FROM EMPLOYEES\")\r\n rows = cur.fetchall()\r\n employee_list = []\r\n for row in rows:\r\n employee_list.append(row)\r\n return employee_list\r\n except (sqlite3.ProgrammingError, sqlite3.Error) as e:\r\n print(\"TABLE EMPLOYEES not found\")\r\n except sqlite3.Error as er:\r\n print(\"Database error: \", er.message)\r\n except Exception as e:\r\n print(e)", "def getEmployees(self):\n return self.employees", "def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200", "def get_employees(self):\n return self.employees", "def employees(self):\n page = app.config['EMPLOYEES_PER_PAGE']\n emps = DeptEmp.query.filter_by(dept_no=self.dept_no)\n return [e.emp_no for e in current_records(emps, g.date)]", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def fourth_query():\n employees = []\n for name, surname, department_name, l_city, l_state_province in session.query(Employees.first_name,\n Employees.last_name,\n Departments.depart_name,\n Locations.city,\n Locations.state_province).filter(\n Employees.department_id == Departments.department_id, Departments.location_id == Locations.location_id).all():\n employees.append(f'{name}, {surname}, {department_name}, {l_city}, {l_state_province}')\n return employees", "def process_all_employees(self):\n return jsonify(encode_list(self.facade_service.get_all_employees()))", "def get_employees(cls, strategy=lazyload):\n cls._check_strategy(strategy)\n\n return db.session.query(Employee).options(\n strategy(Employee.department)\n ).all()", "def test_get_all_employees(self):\n with self.app.app_context():\n self.assertNotEqual(es.get_all(), [])", "def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list", "def seventh_query():\n employees = []\n for name, surname, salary in session.query(Employees.first_name, Employees.last_name, Employees.salary).filter(\n Employees.department_id == Departments.department_id, Departments.location_id == Locations.location_id).\\\n filter(Locations.city == 'London').all():\n employees.append(f'{name}, {surname}, {salary}')\n\n # Вариант №2\n # for name, surname, salary in session.query(Employees.first_name, Employees.last_name, Employees.salary).\\\n # join(Departments, Locations).filter(Locations.city == 'London').all():\n # employees.append(f'{name}, {surname}, {salary}')\n return employees", "def get_employees(self):\n if self.__employees == []:\n with open(\"./data/employees.csv\", \"r\") as employee_file:\n for line in employee_file.readlines():\n username, password = line.split(\",\")\n #new_employee = Employee(username, password)\n self.__employees.append(username) \n \n return self.__employees", "def get_employees_in_department(department_name: str) -> list:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT Employee.empid, Employee.name\n FROM Employee JOIN EmployeeDepartments USING(empid)\n WHERE EmployeeDepartments.department = %s\"\"\"\n cur.execute(sql, (department_name,))\n\n # Attempt to fetch all rows\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n employees = []\n for row in result:\n employees.append(\n [row[0], row[1]]\n )\n cur.close()\n conn.close()\n return employees\n except Exception as e:\n print(\"ooo\")\n print(e)\n # If nothing was returned, return empty list\n cur.close()\n conn.close()\n return []\n\n # TODO Dummy Data - Change to be useful!\n # Return the employees in the department.\n # Each \"row\" has: [ empid, name ]\n\n # employees = [\n # [15905, 'Rea Fibbings'],\n # [9438, 'Julia Norville'],\n # [36020, 'Adora Lansdowne'],\n # [98809, 'Nathanial Farfoot'],\n # [58407, 'Lynne Smorthit'],\n # ]\n #\n # return employees", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this function gets all the admins from the database
def get_admins(self): from Employee import Employee admins = list() cursorRoles = self.dbconnect.get_cursor() cursorRoles.execute('select * from employeeRoles where role=\'admin\'') for row in cursorRoles: admins.append(self.get_employee(row[0])) return admins
[ "def get_all_administrators():\n return User.objects.filter(groups__name=\"administrators\")", "def get_admins():\n users = get_users()\n admins = []\n for user in users:\n if user[\"approval_level\"] == \"admin\":\n admins.append(user)\n\n return admins", "def get_admins(name):\n obj = DataService.objects(name=name).first()\n if obj is None:\n return []\n return list(obj.admins)", "def get_admins(self):\n return self.admins_group.user_set.all()", "def list_admin() -> None:\n admin_users = list(User.objects(admin=True).scalar('email'))\n if admin_users:\n echo('Allowed admins are')\n for email in admin_users:\n echo('- %s' % email)\n else:\n echo('No admins found')\n\n users = list(User.objects(admin=False).scalar('email'))\n if users:\n echo('Rest of users are:')\n for email in users:\n echo('- %s' % email)", "async def _ad_list(self, ctx):\n admin_list = self.database.get_admins(ctx.guild.id)\n if len(admin_list) > 0:\n out = \"```\"\n for admin in admin_list:\n admin_name = self.bot.get_user(admin.user_id)\n admin_name = str(admin_name) if admin_name is not None else admin.user_id\n out += f\"{admin_name}\\n\"\n out += \"```\"\n await ctx.send(out)\n else:\n await ctx.send(\"This guild currently has no administrators.\")", "def get_admins(self):\n admins = User.objects.filter(Q(groups__name=self.admin_group_name()) | Q(is_superuser=True)).distinct()\n return admins", "async def _ad_all(self, ctx):\n all_admins = self.database.get_all_admins()\n consumed = []\n out = \"```\"\n for admin in all_admins:\n if admin.guild_id not in consumed:\n out += f\"Guild: {self.bot.get_guild(admin.guild_id)}\\n\"\n consumed.append(admin.guild_id)\n admin = self.bot.get_user(admin.user_id)\n admin = str(admin) if admin is not None else admin.user_id\n out += f\" {admin}\\n\"\n if out != \"```\":\n out += \"```\"\n await ctx.send(out)\n else:\n await ctx.send(\"No admins currently\")", "def get_all_admins(cls):\r\n try:\r\n # Create a list of all admins in db excluding the super admin.\r\n all_users = list(cls.admin_collection.find({\"username\":{\"$nin\":[parser.get('API', 'ADMIN_NAME')]}},\r\n { \"_id\":0, \"password\":0}))\r\n return all_users\r\n except errors.PyMongoError as e:\r\n ## TODO: Logging\r\n return False", "def getAllAdmins():\r\n if len(request.args) == 0:\r\n users = models.User.query.all()\r\n else:\r\n users = models.User.query.filter(\r\n models.User.Name.like((\"%\" + request.args[\"name\"] + \"%\")) if request.args[\"name\"] is not None else \"\").all()\r\n data = []\r\n for user in users:\r\n data.append({'id': user.id, 'mobile': user.mobile, 'state': user.state, 'isSuperAdmin': user.isSuperAdmin,\r\n 'name': user.Name})\r\n return jsonify(code=200, msg=\"获取管理员列表成功\", data=data)", "def getDevelAdmins(self):\n pass", "def get_list_of_admins() -> List[User]:\n return DBDiscussionSession.query(User).filter(User.group == Group.ADMIN).all()", "def return_admin_list(request):\n del request\n return return_user_list(Administrador)", "def admins(self):\n return User.objects.filter_by_role(role=Roles.GROUP_ADMIN, roles__group=self)", "def getDevelAdmins(self):\n\t\tpass", "def get_admin_users(self):\n for user in self.get_all_users():\n if user.is_admin:\n yield user", "def get_admins() -> Tuple[int, ...]:\n db = get_database_connection()\n admins = [int(admin_id) for admin_id in db.lrange('avito:admin_list', 0, -1)]\n return tuple(admins)", "def __update_admin_cache(self):\n\n header = connect(self.__path)\n curs = header.cursor()\n curs.execute(\"SELECT * FROM admins WHERE id IS NOT NULL\")\n data = curs.fetchall()\n newlist = []\n for item in data:\n newlist.append(item[0])\n self.__admins = newlist", "def get_local_admins():\n admin_list = get_users_config()\n response = []\n\n if \"users\" not in admin_list[\"result\"]:\n return response\n\n if isinstance(admin_list[\"result\"][\"users\"][\"entry\"], list):\n for entry in admin_list[\"result\"][\"users\"][\"entry\"]:\n response.append(entry[\"name\"])\n else:\n response.append(admin_list[\"result\"][\"users\"][\"entry\"][\"name\"])\n\n return response" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gets a single employee out the database on an id
def get_employee(self, id): from Employee import Employee cursor = self.dbconnect.get_cursor() cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,)) row = cursor.fetchone() return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])
[ "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def get(id_: int):\n logger.debug('Retrieving employee by id %i.', id_)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.id == id_\n ).scalar()\n if not employee:\n raise Exception(f\"Can't get employee with id {id_}\", )\n except Exception as exception:\n logger.error('An error occurred while retrieving employee with id %i.'\n ' Exception: %s', id_, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by id %i.', id_)\n return employee", "def get_employee_by_id(employee_id):\n where = Employee.id == employee_id\n query = get_employee_query(where)\n return query.one()", "def get_employee(self, employee_id):\n cursor = self.dbconnect.get_cursor()\n\n try:\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE LOWER(id)=LOWER(%s)', (employee_id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n\n except:\n self.dbconnect.rollback()\n raise", "def get(self, id):\n employee = self.query.get(id)\n return employee if employee and employee.is_deleted == False else None", "def get_employee_by_id(emp_id):\n if emp_id == None:\n raise DataLayerError(message=\"Employee ID Required\")\n if not isinstance(emp_id, int):\n raise DataLayerError(\n f\"Found type {type(emp_id)}, required type {type(0)}\")\n if emp_id <= 0:\n raise DataLayerError(f\"Invalid employee ID : {emp_id}\")\n try:\n connection = DBConnection.getConnection()\n cursor = connection.cursor()\n cursor.execute(\"select * from employee where emp_id=%s\", (emp_id,))\n row = cursor.fetchone()\n connection.commit()\n if row == None:\n raise DataLayerError(\n message=f\"Employee ID : {emp_id} does not exists\")\n emp_id = row[0]\n name = row[1]\n designation_code = row[2]\n dob = row[3]\n day = dob.strftime(\"%d\")\n month = dob.strftime(\"%m\")\n year = dob.strftime(\"%Y\")\n salary = row[4]\n gender = row[5]\n indian = row[6]\n pan_no = row[7]\n aadhar = row[8]\n employee = Employee(emp_id, name, designation_code, day,\n month, year, salary, gender, indian, pan_no, aadhar)\n except Error as error:\n raise DataLayerError(message=error.msg)\n finally:\n try:\n if cursor.is_open():\n cursor.close()\n if connection.is_connected():\n connection.close()\n except:\n pass\n return employee", "def find_employee_by_id(self,id):\n self.employee_id()\n if id in self.emp_id:\n print(self.emp_id[id])\n return self.emp_id[id]\n else:\n print(\"Employee not found\")", "def find_employee(id):\n for employee in emp_data:\n if employee.em_no == id:\n return employee\n return None", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def _get_employee(self):\n emp = self.env['hr.employee'].search([('attendance_code', '=', self.code)]) or False\n if emp:\n self.employee = emp[0].id", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def get_employee_by_name(self, name):\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE name=%s', (name,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])", "def get_employee(self, name):\n name = name.upper()\n if name in EMPLOYEE_MAP:\n name = EMPLOYEE_MAP[name]\n try:\n int(name)\n emps = Employee.objects.filter(id=name)\n except ValueError:\n if name == 'NN':\n emps = Employee.objects.filter(user__first_name='Nieznany')\n elif Employee.objects.filter(user__username__iexact=name).exists():\n emps = Employee.objects.filter(user__username__iexact=name)\n elif len(name) == 3:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:3],\n status=0)\n else:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:],\n status=0)\n if not emps:\n emps = Employee.objects.filter(user__username__istartswith=name)\n if len(emps) == 1:\n return emps[0]\n elif len(emps) > 1:\n self.stdout.write(self.style.ERROR('Multiple employee matches for {}. Choices are:'\n .format(name)))\n for e in emps:\n self.stdout.write(self.style.ERROR(' -{}'.format(e.user.get_full_name())))\n else:\n raise CommandError('Employee {} does not exists! Fix your input file.'.format(name))\n\n return None", "def obtener_empresa(id):\r\n try:\r\n return Empresa.objects.get(id=id)\r\n except Exception,e:\r\n return None", "def get_employeeOnName(self, name):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE name=%s ', (name,))\n if (cursor.rowcount != 0):\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n else:\n return None", "def employers_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=EMPLOYER_TYPE_URI,\n rdf_type_name=EMPLOYER_TYPE_NAME, \n kls=Employer)", "def lookup(cls, id: int):\n record = query_db(\n \"select id, amount, description, user_id from expenses where id = ?\",\n [id],\n one=True,\n )\n if record is None:\n raise NotFound()\n return cls(**record)", "def test_api_can_get_employee_by_id(self):\n res = self.client().get(service_url_emp+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))", "def get_examen(self, id_examen):\n\n self.logger.info(\"\\t[+] get_examen [+]\")\n self.logger.info(f\"\\t[+] id_examen {id_examen} [+]\")\n try:\n return self.examens.select().where(self.examens.columns.id_examen == id_examen).execute()\n except Exception as e:\n self.logger.critical(\"\\t[-] Exception occured [-]\")\n self.logger.critical(\"\\t\" + str(e))\n self.logger.critical(\"\\t[-] Exception occured [-]\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gets a single employee out the database on a name
def get_employeeOnName(self, name): from Employee import Employee cursor = self.dbconnect.get_cursor() cursor.execute('SELECT * FROM employee WHERE name=%s ', (name,)) if (cursor.rowcount != 0): row = cursor.fetchone() return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]) else: return None
[ "def get_employee_by_name(self, name):\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE name=%s', (name,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])", "def get_employee(self, id):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])", "def get_employee(self, name):\n name = name.upper()\n if name in EMPLOYEE_MAP:\n name = EMPLOYEE_MAP[name]\n try:\n int(name)\n emps = Employee.objects.filter(id=name)\n except ValueError:\n if name == 'NN':\n emps = Employee.objects.filter(user__first_name='Nieznany')\n elif Employee.objects.filter(user__username__iexact=name).exists():\n emps = Employee.objects.filter(user__username__iexact=name)\n elif len(name) == 3:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:3],\n status=0)\n else:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:],\n status=0)\n if not emps:\n emps = Employee.objects.filter(user__username__istartswith=name)\n if len(emps) == 1:\n return emps[0]\n elif len(emps) > 1:\n self.stdout.write(self.style.ERROR('Multiple employee matches for {}. Choices are:'\n .format(name)))\n for e in emps:\n self.stdout.write(self.style.ERROR(' -{}'.format(e.user.get_full_name())))\n else:\n raise CommandError('Employee {} does not exists! Fix your input file.'.format(name))\n\n return None", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def get_employee(self, employee_id):\n cursor = self.dbconnect.get_cursor()\n\n try:\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE LOWER(id)=LOWER(%s)', (employee_id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n\n except:\n self.dbconnect.rollback()\n raise", "def get_employee_by_id(emp_id):\n if emp_id == None:\n raise DataLayerError(message=\"Employee ID Required\")\n if not isinstance(emp_id, int):\n raise DataLayerError(\n f\"Found type {type(emp_id)}, required type {type(0)}\")\n if emp_id <= 0:\n raise DataLayerError(f\"Invalid employee ID : {emp_id}\")\n try:\n connection = DBConnection.getConnection()\n cursor = connection.cursor()\n cursor.execute(\"select * from employee where emp_id=%s\", (emp_id,))\n row = cursor.fetchone()\n connection.commit()\n if row == None:\n raise DataLayerError(\n message=f\"Employee ID : {emp_id} does not exists\")\n emp_id = row[0]\n name = row[1]\n designation_code = row[2]\n dob = row[3]\n day = dob.strftime(\"%d\")\n month = dob.strftime(\"%m\")\n year = dob.strftime(\"%Y\")\n salary = row[4]\n gender = row[5]\n indian = row[6]\n pan_no = row[7]\n aadhar = row[8]\n employee = Employee(emp_id, name, designation_code, day,\n month, year, salary, gender, indian, pan_no, aadhar)\n except Error as error:\n raise DataLayerError(message=error.msg)\n finally:\n try:\n if cursor.is_open():\n cursor.close()\n if connection.is_connected():\n connection.close()\n except:\n pass\n return employee", "def get(id_: int):\n logger.debug('Retrieving employee by id %i.', id_)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.id == id_\n ).scalar()\n if not employee:\n raise Exception(f\"Can't get employee with id {id_}\", )\n except Exception as exception:\n logger.error('An error occurred while retrieving employee with id %i.'\n ' Exception: %s', id_, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by id %i.', id_)\n return employee", "def get_employee_by_id(employee_id):\n where = Employee.id == employee_id\n query = get_employee_query(where)\n return query.one()", "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def _get_employee(self):\n emp = self.env['hr.employee'].search([('attendance_code', '=', self.code)]) or False\n if emp:\n self.employee = emp[0].id", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def check_employee(self, employee_name):\n for employee in self.employees:\n if employee.name == employee_name:\n return employee", "def find_employee(id):\n for employee in emp_data:\n if employee.em_no == id:\n return employee\n return None", "def get_employees():\n employees = list()\n try:\n connection = DBConnection.getConnection()\n cursor = connection.cursor()\n cursor.execute(\"select * from employee;\")\n rows = cursor.fetchall()\n connection.commit()\n for data in rows:\n emp_id = data[0]\n name = data[1]\n designation_code = data[2]\n dob = data[3]\n day = dob.strftime(\"%d\")\n month = dob.strftime(\"%m\")\n year = dob.strftime(\"%Y\")\n salary = data[4]\n gender = data[5]\n indian = data[6]\n pan_no = data[7]\n aadhar = data[8]\n employee = Employee(emp_id, name, designation_code, day,\n month, year, salary, gender, indian, pan_no, aadhar)\n employees.append(employee)\n except Error as error:\n raise DataLayerError(message=error.msg)\n finally:\n try:\n if cursor.is_open():\n cursor.close()\n if connection.is_connected():\n connection.close()\n except:\n pass\n return employees", "def get_name(self, name):\n\n self.curr.execute(''' SELECT * FROM parties WHERE name=%s''', (name,))\n party = self.curr.fetchone()\n self.conn.commit()\n self.curr.close()\n return party", "def get(self, name, user):\n connection = self.connect()\n cursor = connection.cursor()\n cursor.execute(self.sql[\"get\"], {\"name\": name, \"user\": user})\n result = cursor.fetchone()\n if result is not None:\n return result[0].split()\n else:\n raise DoesNotExistException(\n \"Could not find an applicable saved roll with that name.\"\n )", "def get(self, id):\n employee = self.query.get(id)\n return employee if employee and employee.is_deleted == False else None", "def lookup_by_employee(self):\n self.clear_console()\n print(self.format_header('Lookup by Employee'))\n\n get_employee_names = Entry.select().distinct(Entry.employee_name).execute()\n employee_names = set()\n [employee_names.add(employee.employee_name.title()) for employee in get_employee_names]\n # allow the user to choose from a name\n [print(name) for name in employee_names]\n while True:\n chosen_name = input('Choose an employee: ').strip()\n if chosen_name == '':\n print('Please choose from the list of available names, or type \"back\" to return to lookup menu')\n continue\n # get the number of matches...\n name_matches = ConsoleUI.get_matches(chosen_name, employee_names)\n if len(name_matches) > 1:\n # clarify...\n while True:\n print(self.clear_console())\n print('Multiple matches:')\n [print(name) for name in name_matches]\n specific_name = input('Choose an exact name, or enter \"all\" to get all matches: ').lower().strip()\n if specific_name == 'all':\n # return all results\n entries = Entry.select().order_by(Entry.created_timestamp.desc())\n entries = entries.where(fn.Lower(Entry.employee_name).contains(chosen_name.lower()))\n self.display_one_at_a_time(entries)\n return True\n elif specific_name.title() in name_matches:\n # return the specific name results\n entries = Entry.select().order_by(Entry.created_timestamp.desc())\n entries = entries.where(fn.Lower(Entry.employee_name) == specific_name.lower())\n self.display_one_at_a_time(entries)\n return True\n elif len(name_matches) == 1:\n # run the query\n entries = Entry.select().order_by(Entry.created_timestamp.desc())\n entries = entries.where(fn.Lower(Entry.employee_name) == chosen_name.lower())\n self.display_one_at_a_time(entries)\n return True\n elif chosen_name == 'Back':\n break\n else:\n # no matches...\n print('Please choose from the list of available names, or type \"back\" to return to lookup menu')", "def test_get_existing_employee(self):\n with self.app.app_context():\n self.assertTrue(es.get_by_name('Mary'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
README.md exists but content is empty.
Downloads last month
29