text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self, s=None, b=None, connection=None, returnCursor=False): """ _executemanybinds_ b is a list of dictionaries for the binds, e.g.: b = [ {'bind1':'value1a', 'bind2': 'value2a'}, {'bind1':'value1b', 'bind2': 'value2b'} ] see: http://www.gingerandjohn.com/archives/2004/02/26/cx_oracle-executemany-example/ Can't executemany() selects - so do each combination of binds here instead. This will return a list of sqlalchemy.engine.base.ResultProxy object's one for each set of binds. returns a list of sqlalchemy.engine.base.ResultProxy objects """ s = s.strip() if s.lower().endswith('select', 0, 6): """ Trying to select many """ if returnCursor: result = [] for bind in b: result.append(connection.execute(s, bind)) else: result = ResultSet() for bind in b: resultproxy = connection.execute(s, bind) result.add(resultproxy) resultproxy.close() return self.makelist(result) """ Now inserting or updating many """ result = connection.execute(s, b) return self.makelist(result)
[ -1 ]
async def METHOD_NAME(next_link=None): request = prepare_request(next_link) _stream = False pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=_stream, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response
[ 19, 243 ]
def METHOD_NAME(self): pass
[ 164, 41, 8869 ]
def METHOD_NAME(tag: Dict[str, Any]) -> bool: return re.match(r"^[0-9]+-[0-9]+-[a-z0-9]+$", tag["name"]) is not None
[ 250, 82 ]
def METHOD_NAME(l): for x in range(l.numberOfMothers()): mom = l.mother(x) if mom.status() > 2: return True id = abs(mom.pdgId()) if id > 1000000: return True if id > 100: return False if id < 6: return False if id == 21: return False if id in [11,12,13,14,15,16]: if l.status() > 2: return True return METHOD_NAME(mom) if id >= 22 and id <= 39: return True return True
[ 137, 130, 280, -1, 13886 ]
def METHOD_NAME(self): """ rx_sync_start_available: Returns a list of possible keys used for rx_sync_start """ try: return self._get_iio_dev_attr_str( "sync_start_enable_available", _ctrl=self._rxadc ) except: # noqa: E722 return "arm"
[ 2068, 164, 447, 1272 ]
def METHOD_NAME(self) -> Sequence[Sequence[str]]: """ The rule baseline result """ return pulumi.get(self, "results")
[ 51 ]
def METHOD_NAME(): # Connecting ClearML with the current process, # from here on everything is logged automatically task = Task.init(project_name='examples', task_name='Remote_execution PyTorch MNIST train') # Training settings parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument('--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.01, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--save-model', action='store_true', default=True, help='For Saving the current Model') args = parser.parse_args() use_cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) device = torch.device("cuda" if use_cuda else "cpu") kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {} train_loader = torch.utils.data.DataLoader( datasets.MNIST(os.path.join('..', 'data'), train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader( datasets.MNIST(os.path.join('..', 'data'), train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.test_batch_size, shuffle=True, **kwargs) model = Net().to(device) optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) for epoch in range(1, args.epochs + 1): if epoch > 1: # We run training for 1 epoch to make sure nothing crashes then local execution will be terminated. # Execution will switch to remote execution by the agent listening to specified queue task.execute_remotely(queue_name="default") train(args, model, device, train_loader, optimizer, epoch) test(args, model, device, test_loader, epoch) if args.save_model: torch.save(model.state_dict(), os.path.join(gettempdir(), "mnist_cnn_remote.pt"))
[ 57 ]
def METHOD_NAME(next_link=None): if not next_link: request = build_list_metrics_request( resource_group_name=resource_group_name, account_name=account_name, subscription_id=self._config.subscription_id, filter=filter, api_version=api_version, template_url=self.list_metrics.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request
[ 123, 377 ]
def METHOD_NAME(): # # Technically we don't need to call alias_flavor() here (since it's # already been invoked for this REPO_CFG variable), but we do it # anyway to support `fail-on-flavor-aliasing` testing. Basically, # alias_flavor() will fail() if flavor aliasing is disabled and we # try to return an aliased flavor. # return alias_flavor(REPO_CFG.flavor_default)
[ 19, 3032, 235 ]
def METHOD_NAME(self, a): net = a.network() agent1 = a.replicator(network=net) agent2 = a.replicator(network=net) agent3 = a.replicator(network=net) agent1.connect(direction="to", whom=agent2) agent1.connect(direction="to", whom=agent3) info = a.info(origin=agent1) agent1.transmit(what=models.Info, to_whom=nodes.Agent) agent2.receive() agent3.receive() assert agent1.infos()[0].contents == agent2.infos()[0].contents assert agent1.infos()[0].contents == agent3.infos()[0].contents assert agent1.infos()[0].id != agent2.infos()[0].id != agent3.infos()[0].id transmissions = info.transmissions() assert len(transmissions) == 2
[ 9, 1849, 5022, 8211, 24, 13273 ]
def METHOD_NAME(self) -> bytes: """Reads 4 bytes and increases cursor""" if self.cursor + 4 > self.length_input: raise ValueError( "BMA Layer NRL Compressor: Reached EOF while reading data." ) oc = self.cursor self.cursor += 4 return read_bytes(self.uncompressed_data, oc, 4)
[ 203 ]
def METHOD_NAME(s: str): if s.endswith("."): return FAIL return OK
[ 4812, 688 ]
def METHOD_NAME( db_session: Session, function_calls: List[ContractFunctionCall], blockchain_type: AvailableBlockchainType, label_name=CRAWLER_LABEL, ) -> None: label_model = get_label_model(blockchain_type) transactions_hashes_to_save = [ function_call.transaction_hash for function_call in function_calls ] existing_labels = ( db_session.query(label_model.transaction_hash) .filter( label_model.label == label_name, label_model.log_index == None, label_model.transaction_hash.in_(transactions_hashes_to_save), ) .all() ) existing_labels_transactions = [label[0] for label in existing_labels] labels_to_save = [ _function_call_to_label(blockchain_type, function_call) for function_call in function_calls if function_call.transaction_hash not in existing_labels_transactions ] logger.info(f"Saving {len(labels_to_save)} labels to session") db_session.add_all(labels_to_save)
[ 238, 559, 1929, 24, 240 ]
def METHOD_NAME(self, data, padding=None, hashAlg=None, saltLen=None): """ :type data: bytearray :param data: The value which will be signed (generally a binary encoding of hash output. :type padding: str :param padding: Ignored, present for API compatibility with RSA :type hashAlg: str :param hashAlg: name of hash that was used for calculating the bytes :type saltLen: int :param saltLen: Ignored, present for API compatibility with RSA """ N = numBits(self.q) digest_len = len(data) * 8 digest = bytesToNumber(data) if N < digest_len: digest >>= digest_len - N k = getRandomNumber(1, (self.q-1)) if gmpyLoaded or GMPY2_LOADED: k = mpz(k) digest = mpz(digest) r = powMod(self.g, k, self.p) % self.q s = invMod(k, self.q) * (digest + self.private_key * r) % self.q return encode_sequence(encode_integer(r), encode_integer(s))
[ 2452 ]
def METHOD_NAME(self): self._events.append('startTestRun') super(_BaseLoggingResult, self).METHOD_NAME()
[ 447, 9, 22 ]
def METHOD_NAME(self, input): print("SIGNER state #3") u = self.group.random() s = self.group.random() d = self.group.random() g = input.get('g') y = input.get('y') str = "info" msg = integer(SHA2(str)) z = (msg ** ((p - 1)/q)) % p a = g ** u b = (g ** s) * (z ** d) Protocol.store(self, ('u', u), ('s', s), ('d', d)) Protocol.setState(self, 5) return { 'a':a, 'b':b, 's':s }
[ 3529, 3530 ]
def METHOD_NAME(cls, spec, value): return cls.class_map[spec['type']](spec, value)
[ 280, 1457 ]
def METHOD_NAME(authorization_provider_id: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, service_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAuthorizationProviderResult]: """ Gets the details of the authorization provider specified by its identifier. Azure REST API version: 2022-08-01. :param str authorization_provider_id: Identifier of the authorization provider. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str service_name: The name of the API Management service. """ ...
[ 19, 1355, 2275, 146 ]
def METHOD_NAME(): """ test to_dict function for ShipDrift object create a new ship_drift object and make sure it has same properties """ new_wind = ShipDriftMover(wind_file, topology_file, grid_type=2) serial = new_wind.serialize() nw2 = ShipDriftMover.deserialize(serial) assert new_wind == nw2
[ 9, 183, 2696 ]
def METHOD_NAME(): from litex.build.parser import LiteXArgumentParser parser = LiteXArgumentParser(platform=xilinx_zcu104.Platform, description="LiteX SoC on ZCU104.") parser.add_target_argument("--sys-clk-freq", default=125e6, type=float, help="System clock frequency.") args = parser.parse_args() soc = BaseSoC( sys_clk_freq = args.sys_clk_freq, **parser.soc_argdict ) builder = Builder(soc, **parser.builder_argdict) if args.build: builder.build(**parser.toolchain_argdict) if args.load: prog = soc.platform.create_programmer() prog.load_bitstream(builder.get_bitstream_filename(mode="sram"))
[ 57 ]
def METHOD_NAME(s: str): level_mapping = StorageLevel.__members__ level_strings = [ss.strip() for ss in s.upper().split("|")] levels = [] for ls in level_strings: if ls not in level_mapping: # pragma: no cover raise ValueError(f"Unknown level {ls}") levels.append(level_mapping[ls]) return functools.reduce(operator.or_, levels)
[ 280, 3 ]
def METHOD_NAME(expr, vx, vy, data, fref): n = len(data) A = te.placeholder((n,), name="A", dtype=expr.dtype) B = te.placeholder((n,), name="B", dtype=expr.dtype) def make_binds(i): x = expr x = tvm.tir.Let(vx, A[i], x) x = tvm.tir.Let(vy, B[i], x) return x C = te.compute((n,), make_binds) s = te.create_schedule([C.op]) f = tvm.build(s, [A, B, C], "llvm") a = tvm.nd.array(np.array([x for x, y in data], dtype=expr.dtype)) b = tvm.nd.array(np.array([y for x, y in data], dtype=expr.dtype)) c = tvm.nd.array(np.zeros(len(data), dtype=expr.dtype)) f(a, b, c) cref = np.array([fref(x, y) for x, y in data]) np.testing.assert_equal(c.numpy(), cref)
[ 250, 99 ]
def METHOD_NAME(i, cai): for sec in s: sec.cai = cai h.finitialize(-65) while h.t < 15.0: h.fadvance() plt(i)
[ -1 ]
def METHOD_NAME(builder, authid): return HelloNewAddAuthid(builder, authid)
[ 238, 13814 ]
def METHOD_NAME(cls, url, **kwargs): """ Create an :class:`Media` from a URL. :code:`Media.from_url(url)` is equivalent to: .. code-block: python med = Media(value=url, format='url') But both unicode and bytes arguments are allowed for ``url``. Parameters ---------- url: [str, bytes] The location of a URL to load. """ if isinstance(url, str): # If str, it needs to be encoded to bytes url = url.encode('utf-8') return cls(value=url, format='url', **kwargs)
[ 280, 274 ]
def METHOD_NAME(jars_1: List, jars_2: List) -> bool: """ Checks if two lists of jar files contain the same jars. The order of the jars in the list does not matter. Args: jars_1 (List): A list of jar files. jars_2 (List): A list of jar files. Returns: bool: True if the lists contain the same jars, False otherwise. """ if jars_1 is None and jars_2 is None: return True if jars_1 is None or jars_2 is None: return False if len(jars_1) != len(jars_2): return False file_names_1 = get_file_names(jars_1) file_names_2 = get_file_names(jars_2) return set(file_names_1) == set(file_names_2)
[ 1992, 1101, 2692 ]
def METHOD_NAME(self) -> Optional["QObject"]: return self._createViewFromQML()
[ 19, 52, 1024 ]
def METHOD_NAME(string): """Escape all regular expressions special characters from STRING.""" return re.escape(string)
[ 211, 4748 ]
def METHOD_NAME(start, duration): start_time = datetime.datetime.now().replace(hour=start, minute=0) return start_time + datetime.timedelta(hours=duration // 60, minutes=duration % 60)
[ -1, 1798, 104 ]
def METHOD_NAME(fname, dtypes=None): """Read a tsv file into an OrderedDict. Parameters ---------- fname : str Path to the file being loaded. dtypes : list, optional List of types to cast the values loaded as. This is specified column by column. Defaults to None. In this case all the data is loaded as strings. Returns ------- data_dict : collections.OrderedDict Keys are the column names, and values are the column data. """ from .utils import warn # avoid circular import data = np.loadtxt( fname, dtype=str, delimiter="\t", ndmin=2, comments=None, encoding="utf-8-sig" ) column_names = data[0, :] info = data[1:, :] data_dict = OrderedDict() if dtypes is None: dtypes = [str] * info.shape[1] if not isinstance(dtypes, (list, tuple)): dtypes = [dtypes] * info.shape[1] if not len(dtypes) == info.shape[1]: raise ValueError( "dtypes length mismatch. Provided: {0}, " "Expected: {1}".format(len(dtypes), info.shape[1]) ) empty_cols = 0 for i, name in enumerate(column_names): values = info[:, i].astype(dtypes[i]).tolist() data_dict[name] = values if len(values) == 0: empty_cols += 1 if empty_cols == len(column_names): warn(f"TSV file is empty: '{fname}'") return data_dict
[ 280, 2255 ]
def METHOD_NAME(scope, transaction_style, request): # type: (Scope, str, Any) -> None name = "" if transaction_style == "endpoint": endpoint = request.scope.get("endpoint") if endpoint: name = transaction_from_function(endpoint) or "" elif transaction_style == "url": route = request.scope.get("route") if route: path = getattr(route, "path", None) if path is not None: name = path if not name: name = _DEFAULT_TRANSACTION_NAME source = TRANSACTION_SOURCE_ROUTE else: source = SOURCE_FOR_STYLE[transaction_style] scope.set_transaction_name(name, source=source) logger.debug( "[FastAPI] Set transaction name and source on scope: %s / %s", name, source )
[ 0, 1853, 156, 61, 1458 ]
def METHOD_NAME(self): response = self.client.get(reverse("home")) self.assertRedirects(response, self.project.get_absolute_url())
[ 9, 97, 155, 1231 ]
def METHOD_NAME(self): for record in self: country_code = record.country_id.code or "" if record.cpf and country_code.upper() == "BR": cpf = misc.punctuation_rm(record.cpf) if not cnpj_cpf.validar(cpf): raise ValidationError(_("Invalid CPF!")) return True
[ 250, 1370 ]
def METHOD_NAME(self): return base64.b64encode(self.serialize()).decode("utf8")
[ 24, 2426 ]
def METHOD_NAME(self): for record in self: self._event("on_record_unlink").notify(record) result = super(Base, self).METHOD_NAME() return result
[ 4384 ]
f METHOD_NAME(self):
[ 9, 756 ]
def METHOD_NAME(StrParI1, StrParI2): IntLoc = 1 while IntLoc <= 1: if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1: CharLoc = 'A' IntLoc = IntLoc + 1 if CharLoc >= 'W' and CharLoc <= 'Z': IntLoc = 7 if CharLoc == 'X': return TRUE else: if StrParI1 > StrParI2: IntLoc = IntLoc + 7 return TRUE else: return FALSE
[ 7640 ]
def METHOD_NAME(chan0, chan1, phase_correction): assert len(chan0) == len(chan1) (p, s) = measure_phase_and_delay(chan0, chan1) # print("Across Chips Sample delay: ",s) # print("Phase delay: ",p,"(Degrees)") # print(phase_correction) return (sub_phases(phase_correction, [int(p * 1000)] * 4), s)
[ 599, 61, 270, 3200, 1540 ]
def METHOD_NAME(settings, max_blocks_per_call=10000): rpc = BitcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpassword']) height = settings['min_height'] while height < settings['max_height']+1: num_blocks = min(settings['max_height']+1-height, max_blocks_per_call) batch = [] for x in range(num_blocks): batch.append(rpc.build_request(x, 'getblockhash', [height + x])) reply = rpc.execute(batch) if reply is None: print('Cannot continue. Program will halt.') return None for x,resp_obj in enumerate(reply): if rpc.response_is_error(resp_obj): print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr) sys.exit(1) assert(resp_obj['id'] == x) # assume replies are in-sequence if settings['rev_hash_bytes'] == 'true': resp_obj['result'] = hex_switchEndian(resp_obj['result']) print(resp_obj['result']) height += num_blocks
[ 19, 573, 2012 ]
def METHOD_NAME() -> dict: parser = ArgumentParser() add_required_arguments(parser) add_optional_arguments(parser) return vars(parser.METHOD_NAME())
[ 214, 335 ]
def METHOD_NAME() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return False
[ 663, 141, 1046, 673, 8599 ]
def METHOD_NAME(self, manifest): return manifest.get('config').get("digest").split(":")[1]
[ 19, 660, 200, 171 ]
def METHOD_NAME(self): logger.hr(f'{self.FUNCTION_NAME_BASE}{self.battle_count}', level=2) prev = self.battle_count result = False for _ in range(10): try: result = self.battle_function() break except MapEnemyMoved: if self.battle_count > prev: result = True break else: continue if not result: logger.warning('ScriptError, No combat executed.') if self.config.Error_HandleError: logger.warning('ScriptError, No combat executed, Withdrawing') self.withdraw() else: raise ScriptError('No combat executed.') return result
[ 750, 385, 1505 ]
def METHOD_NAME(self): self._testInsertGlyph(setGlyphName=False)
[ 9, 0, 6634, 41, 156, 98 ]
def METHOD_NAME(gherkin_languages_path, output_file=None, encoding=None, verbose=False): """Workhorse. Performs the conversion from "gherkin-languages.json" to "i18n.py". Writes output to file or console (stdout). :param gherkin_languages_path: File path for JSON file. :param output_file: Output filename (or STDOUT for: None, "stdout", "-") :param encoding: Optional output encoding to use (default: UTF-8). :param verbose: Enable verbose mode (as bool; optional). """ if encoding is None: encoding = "UTF-8" # -- STEP 1: Load JSON data. json_encoding = "UTF-8" languages = json.load(open(gherkin_languages_path, encoding=json_encoding)) languages = data_normalize(languages, verbose=verbose) # languages = yaml_normalize(languages) # -- STEP 2: Generate python module with i18n data. header = u'''# -*- coding: {encoding} -*-
[ 16673, 2539, 24, 440, 298 ]
def METHOD_NAME(self) -> None: """Close connection""" self.conn.close()
[ 72, 783 ]
def METHOD_NAME(private_cloud_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, segment_id: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkloadNetworkSegmentResult]: """ NSX Segment :param str private_cloud_name: Name of the private cloud :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str segment_id: NSX Segment identifier. Generally the same as the Segment's display name """ ...
[ 19, 2454, 1228, 4373, 146 ]
def METHOD_NAME(data): input_quantizer = model._bn.input_quantizers[0] if isinstance(input_quantizer, StaticGridTensorQuantizer): return input_quantizer.quantize_dequantize(data, input_quantizer.round_mode) assert isinstance(input_quantizer, LearnedGridTensorQuantizer) encoding = input_quantizer.encoding encoding_min = torch.tensor([encoding.min]) encoding_max = torch.tensor([encoding.max]) return input_quantizer.quantize_dequantize(data, encoding_min, encoding_max)
[ 1429, 362 ]
def METHOD_NAME(A: dace.float64[10, 5, 3]): return np.mean(A, axis=-2)
[ 9, 314, 2927 ]
def METHOD_NAME(self, func, use_previous_behavior=True, preserves_partitioning=False, **kwargs): if use_previous_behavior is True: LOGGER.warning(f"please use `applyPartitions` instead of `mapPartitions` " f"if the previous behavior was expected. " f"The previous behavior will not work in future") return self.applyPartitions(func) return Table(self._rp.map_partitions(func, options={"shuffle": not preserves_partitioning}))
[ 422, 1031 ]
def METHOD_NAME(self): info = self.find_indextype(self.SPEC_HEADER) _, _, offset, length = self.sections()[info] with open(self.filename, 'rb') as f: f.seek(offset) dataType, numPoints, xUnits, yUnits, firstX, lastX, noise = \ struct.unpack('<iiiifff', f.read(28)) return numPoints, firstX, lastX,
[ 203, 1457, 572 ]
def METHOD_NAME(self: SharedUtils) -> list: return range_expand(default(get(self.switch_data_combined, "mlag_interfaces"), get(self.default_interfaces, "mlag_interfaces"), []))
[ 13016, 703 ]
def METHOD_NAME(self): file = open("example.glsl") shader_sourcecode = file.read() size = self.width, self.height self.shadertoy = Shadertoy(size, shader_sourcecode) self.channel0 = self.shadertoy.ctx.framebuffer( color_attachments=[self.shadertoy.ctx.texture(size, components=4)] ) self.shadertoy.channel_0 = self.channel0.color_attachments[0] self.channel1 = self.shadertoy.ctx.framebuffer( color_attachments=[self.shadertoy.ctx.texture(size, components=4)] ) self.shadertoy.channel_1 = self.channel1.color_attachments[0]
[ 557, 1871 ]
def METHOD_NAME(cls, token): """Get the AccessRequestToken referenced by the specified token.""" return cls.query.filter_by(token=token).one_or_none()
[ 19, 604, 466 ]
def METHOD_NAME(*args, **kwargs): func_to_call = get_func_to_call() logger.info("running %s()...", func.__name__) try: test_map[func.__name__] = dict() test_map[func.__name__]["result"] = SUCCESSED test_map[func.__name__]["error_message"] = "" test_map[func.__name__]["error_stack"] = "" test_map[func.__name__]["error_normalized"] = "" test_map[func.__name__]["start_dt"] = dt.datetime.utcnow() ret = func_to_call(*args, **kwargs) except (AssertionError, AzureError, CliTestError, CliExecutionError, SystemExit, JMESPathCheckAssertionError) as e: use_exception_cache = os.getenv("TEST_EXCEPTION_CACHE") if use_exception_cache is None or use_exception_cache.lower() != "true": raise test_map[func.__name__]["end_dt"] = dt.datetime.utcnow() test_map[func.__name__]["result"] = FAILED test_map[func.__name__]["error_message"] = str(e).replace("\r\n", " ").replace("\n", " ")[:500] test_map[func.__name__]["error_stack"] = traceback.format_exc().replace( "\r\n", " ").replace("\n", " ")[:500] logger.info("--------------------------------------") logger.info("step exception: %s", e) logger.error("--------------------------------------") logger.error("step exception in %s: %s", func.__name__, e) logger.info(traceback.format_exc()) exceptions.append((func.__name__, sys.exc_info())) else: test_map[func.__name__]["end_dt"] = dt.datetime.utcnow() return ret
[ 291 ]
def METHOD_NAME(parameters_string): parameters = [] for parameter_string in split_parameters_string(parameters_string): match = re.search(r'\s*(?:\[(?P<attributes>.*?)\]\s+)?(?P<type_and_name>.*)', parameter_string) attributes_string, type_and_name_string = match.group('attributes', 'type_and_name') split = type_and_name_string.rsplit(' ', 1) parameter_kind = 'class' if split[0].startswith('struct '): parameter_kind = 'struct' split[0] = split[0][7:] elif split[0].startswith('enum:'): parameter_kind = split[0][:split[0].find(' ')] split[0] = split[0][split[0].find(' ') + 1:] parameter_type = split[0] parameter_name = split[1] parameters.append(model.Parameter(kind=parameter_kind, type=parameter_type, name=parameter_name, attributes=parse_attributes_string(attributes_string))) return parameters
[ 214, 386, 144 ]
def METHOD_NAME( self, connection, engine, Base, User ): create_view( name='trivial_view', selectable=sa.select(*_select_args(User.id)), metadata=Base.metadata, ) Base.metadata.create_all(engine) view = CreateView( name='trivial_view', selectable=sa.select(*_select_args(User.id)), replace=True, ) with connection.begin(): connection.execute(view) Base.metadata.drop_all(engine)
[ 9, 1717, 3351, 369, 1153 ]
def METHOD_NAME(self) -> int: return self.y
[ 1635 ]
def METHOD_NAME(self): return self.get("/installation/repositories")
[ 19, 2223 ]
def METHOD_NAME(self, node): """ Swap out Python's AnnAssign with an Assign node where the annotation function is called. Example: Original: y: Tensor_Type(1,2,3, Dyn) = f2(x) Output: y = annotate(f2(x),Tensor_Type((1,2,3,Dyn))) """ return ast.Assign(targets=[node.target], value=ast.Call( func=ast.Name(id='annotate', ctx=ast.Load()), args=[node.value, node.annotation], keywords=[]))
[ 716, 5165, 1283 ]
def METHOD_NAME(self) -> Optional[str]: """ The user that created the API key. """ return pulumi.get(self, "created_by")
[ 152, 604 ]
def METHOD_NAME(self): """ Get the description (shown below the title). Defaults to ``None``, which means that no description is rendered. """ return None
[ 19, 1067 ]
def METHOD_NAME(self): metric = WordMetric(mode='ignore_case_symbol') metric.process(None, self.pred) eval_res = metric.evaluate(size=3) self.assertEqual(eval_res['recog/word_acc_ignore_case_symbol'], 1.0)
[ 9, 2236, 7774, 684, 331, 1608, 1341 ]
def METHOD_NAME(): summary_modified_field = '_current_version__autoapprovalsummary__modified' # We don't take deleted reports into account valid_abuse_report_states = ( AbuseReport.STATES.UNTRIAGED, AbuseReport.STATES.VALID, AbuseReport.STATES.SUSPICIOUS, ) recent_abuse_reports_subquery = AbuseReport.objects.filter( state__in=valid_abuse_report_states, created__gte=OuterRef(summary_modified_field), guid=OuterRef('guid'), ) return [ # Only recalculate add-ons that received recent abuse reports # possibly through their authors. Q( Exists(recent_abuse_reports_subquery), ) | Q( authors__abuse_reports__state__in=valid_abuse_report_states, authors__abuse_reports__created__gte=F(summary_modified_field), ) # And check ratings that have a rating of 3 or less | Q( _current_version__ratings__deleted=False, _current_version__ratings__created__gte=F(summary_modified_field), _current_version__ratings__rating__lte=3, ) ]
[ 19, 9425, 2002, 469 ]
def METHOD_NAME(self, mock_echo): result = self.run_command( ["remote-build", "--launchpad-accept-public-upload", "--recover"] ) self.assertThat(result.exit_code, Equals(0)) self.mock_lc_init.assert_called_once_with( project=mock.ANY, architectures=mock.ANY, deadline=mock.ANY, build_id="snapcraft-test-snap-fakehash123", )
[ 9, 2437, 56, 2986, 1916, 4551, 1161 ]
def METHOD_NAME(): git_version = subprocess.Popen( ["git", "--version"], shell=False, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ).communicate()[0] if not git_version: log.error("Git not installed") return False log.debug("Detected git version %s", git_version) return Version(git_version.split()[-1])
[ 1493, 281 ]
def METHOD_NAME(self, new_values, profile_name): # The access_key/secret_key are now *always* written to the shared # credentials file (~/.aws/credentials), see aws/aws-cli#847. # post-conditions: ~/.aws/credentials will have the updated credential # file values and new_values will have the cred vars removed. credential_file_values = {} if 'aws_access_key_id' in new_values: credential_file_values['aws_access_key_id'] = new_values.pop( 'aws_access_key_id') if 'aws_secret_access_key' in new_values: credential_file_values['aws_secret_access_key'] = new_values.pop( 'aws_secret_access_key') if credential_file_values: if profile_name is not None: credential_file_values['__section__'] = profile_name shared_credentials_filename = os.path.expanduser( self._session.get_config_variable('credentials_file')) self._config_writer.update_config( credential_file_values, shared_credentials_filename)
[ 77, 1737, 6471, 171, 199 ]
def METHOD_NAME(domain: str, message: str) -> str: ...
[ -1 ]
def METHOD_NAME(self, id_): # See the comment for RadioSelect.id_for_label() if id_: id_ += "_0" return id_
[ 147, 43, 636 ]
def METHOD_NAME(ws_app: WebSocketApp, msg: str): # We strongly trust that the contract on API will hold atm :D event_dict = json.loads(msg) labels = _LogEventLabels(**event_dict.get("labels", {})) if "message" in event_dict: message = event_dict["message"] timestamp = dateutil.parser.isoparse(event_dict["timestamp"]) event = _LogEvent( message=message, timestamp=timestamp, component_name=component_name, labels=labels, ) read_queue.put(event)
[ 1076 ]
def METHOD_NAME(self, schema): query = "SELECT database, table, name FROM system.columns WHERE database NOT IN ('system')" results, error = self.run_query(query, None) if error is not None: self._handle_run_query_error(error) results = json_loads(results) for row in results["rows"]: table_name = "{}.{}".format(row["database"], row["table"]) if table_name not in schema: schema[table_name] = {"name": table_name, "columns": []} schema[table_name]["columns"].append(row["name"]) return list(schema.values())
[ 19, 2253 ]
def METHOD_NAME(self): self.run_test(ntime=1024, nchan=32, max_delay=1, batch_shape=(7,9,5))
[ 9, 9835, -1, -1, 14803, 1136, 822 ]
def METHOD_NAME(self, temperature): """ Sets the low threshold temperature of thermal Args : temperature: A float number up to nearest thousandth of one degree Celsius, e.g. 30.125 Returns: A boolean, True if threshold is set successfully, False if not """ raise NotImplementedError
[ 0, 3420, 853 ]
def METHOD_NAME(self) -> str: """ The name of the resource """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(): """Returns a platform-specific root directory for user config settings.""" # On Windows, prefer %LOCALAPPDATA%, then %APPDATA%, since we can expect the # AppData directories to be ACLed to be visible only to the user and admin # users (https://stackoverflow.com/a/7617601/1179226). If neither is set, # return None instead of falling back to something that may be world-readable. if os.name == "nt": appdata = os.getenv("LOCALAPPDATA") if appdata: return appdata appdata = os.getenv("APPDATA") if appdata: return appdata return None # On non-windows, use XDG_CONFIG_HOME if set, else default to ~/.config. xdg_config_home = os.getenv("XDG_CONFIG_HOME") if xdg_config_home: return xdg_config_home return os.path.join(os.path.expanduser("~"), ".config")
[ 19, 21, 200, 2851 ]
def METHOD_NAME(): models = [] models.extend( [ VertaModelNoImpl, VertaModelOnlyInit, VertaModelOnlyPredict, ] ) return models
[ 6600, 10884, 379 ]
def METHOD_NAME(self): return QueryVersion(display=self.display, opcode=self.display.get_extension_major(extname), major_version=1, minor_version=1)
[ 539, 281 ]
def METHOD_NAME(): '''Test that an instance of MetaRefElementArgMetadata can be created successfully. Also check the input value with mixed case. ''' ref_element_arg = MetaRefElementArgMetadata("Normals_To_Faces") assert isinstance(ref_element_arg, MetaRefElementArgMetadata) assert ref_element_arg.reference_element == "normals_to_faces"
[ 9, 129 ]
def METHOD_NAME(self): self.assertEqual(utils.format_datetime(self.naive_dt), self.datestring + ' -0000')
[ 9, 4806, 884 ]
def METHOD_NAME(self): return self.client.format_url( "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AzureStackHCI/clusters/{clusterName}/arcSettings/{arcSettingName}/createArcIdentity", **self.url_parameters )
[ 274 ]
def METHOD_NAME(source_dir: Path) -> Path: setup_py = source_dir / "setup.py" setup_py.write_text( "from setuptools import setup; " 'setup(name="demo", ' 'version="0.1.0", ' 'install_requires=["package"])' ) return source_dir
[ 2660, 102 ]
def METHOD_NAME(self) -> None: asset_name = "wiki.en.vec" asset_path = get_asset_path(asset_name) with tempfile.TemporaryDirectory() as dir_name: data_path = os.path.join(dir_name, asset_name) shutil.copy(asset_path, data_path) vector_transform = VectorTransform(FastText(root=dir_name, validate_file=False)) jit_vector_transform = torch.jit.script(vector_transform) # The first 3 entries in each vector. expected_fasttext_simple_en = torch.tensor( [[-0.065334, -0.093031, -0.017571], [-0.32423, -0.098845, -0.0073467]] ) self.assertEqual(vector_transform(["the", "world"])[:, 0:3], expected_fasttext_simple_en) self.assertEqual(jit_vector_transform(["the", "world"])[:, 0:3], expected_fasttext_simple_en)
[ 9, 798, 1053 ]
def METHOD_NAME() -> None: ...
[ 7100 ]
f METHOD_NAME(self):
[ 19, 53, 1173, 578 ]
f METHOD_NAME(self):
[ 9, 4360, 1922, 651 ]
def METHOD_NAME(agent_plugin_service, flask_client, error): agent_plugin_service.install_plugin_archive = MagicMock(side_effect=error) resp = flask_client.put( get_url_for_resource(InstallAgentPlugin), data=AGENT_PLUGIN, follow_redirects=True, ) assert resp.status_code == HTTPStatus.INTERNAL_SERVER_ERROR
[ 9, 428, 2793, 808, 2026, 163, 168 ]
def METHOD_NAME(db, info): FakeModel = get_fake_model(model_base=models.UUIDModel) class Serializer(serializers.ModelSerializer): class Meta: model = FakeModel fields = "__all__" class CustomMutation1(Mutation): class Meta: serializer_class = Serializer class CustomMutation2(Mutation): class Meta: serializer_class = Serializer class CustomValidation(BaseValidation): @validation_for(CustomMutation1) @validation_for(CustomMutation2) def validate_custom_mutation(self, mutation, data, info): data["test"] = "test" return data data = CustomValidation().validate(CustomMutation1, {}, info) assert data["test"] == "test" data = CustomValidation().validate(CustomMutation2, {}, info) assert data["test"] == "test"
[ 9, 343, 437, 5580, 4263 ]
def METHOD_NAME(self): """Sequence: ensure ability to assign a Dataset to a Sequence item""" ds = Dataset() ds.add_new((1, 1), "IS", 1) # Create a single element Sequence first seq = Sequence( [ Dataset(), ] ) seq[0] = ds assert ds == seq[0]
[ 9, 1205, 776 ]
def METHOD_NAME( df: Union[str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame], column: str = "", ) -> Union[bool, pd.Series, pd.DataFrame]: """ Validate if a data cell is CPJ in a DataFrame column. For each cell, return True or False. Parameters ---------- df A pandas or Dask DataFrame containing the data to be validated. col The name of the column to be validated. """ if isinstance(df, (pd.Series, dd.Series)): return df.apply(cpj.is_valid) elif isinstance(df, (pd.DataFrame, dd.DataFrame)): if column != "": return df[column].apply(cpj.is_valid) else: return df.applymap(cpj.is_valid) return cpj.is_valid(df)
[ 187, 3630, -1 ]
def METHOD_NAME(x, is_training): # Reduce hw by avg and max # Return cat([avg_pool_0, avg_pool_1, ..., max_pool_0, max_pool_1, ...]) if not isinstance(x, (list, tuple)): return avg_max_reduce_hw_helper(x, is_training) elif len(x) == 1: return avg_max_reduce_hw_helper(x[0], is_training) else: res_avg = [] res_max = [] for xi in x: avg, max = avg_max_reduce_hw_helper(xi, is_training, False) res_avg.append(avg) res_max.append(max) res = res_avg + res_max return paddle.concat(res, axis=1)
[ 1654, 232, 332, 3354 ]
def METHOD_NAME(sents, args): g2p = G2p() out_sents = [] res_wrds = load_reserve_word(args.reserve_word) for sent in sents: col1 = "" if args.reserve_first_column: col1, sent = sent.split(None, 1) sent = process_sent(sent, g2p, res_wrds, args) if args.reserve_first_column and col1 != "": sent = f"{col1} {sent}" out_sents.append(sent) return out_sents
[ 356, 8138 ]
def METHOD_NAME(self): if isinstance(self._monitor_address, tuple): moncdev = "socket,id=mon,host=%s,port=%s" % ( self._monitor_address[0], self._monitor_address[1]) else: moncdev = 'socket,id=mon,path=%s' % self._monitor_address return ['-chardev', moncdev, '-mon', 'chardev=mon,mode=control', '-display', 'none', '-vga', 'none']
[ 414, 335 ]
def METHOD_NAME(self, agent: AgentID) -> ObsType | None: if not self._has_reset: EnvLogger.error_observe_before_reset() return super().METHOD_NAME(agent)
[ 6427 ]
def METHOD_NAME(self) -> Optional[List[ParameterBuilder]]: return self._validation_parameter_builders
[ 437, 511, 8881 ]
def METHOD_NAME( monitor: zmq.asyncio.Socket, loop: asyncio.BaseEventLoop ) -> None: """A thread that prints events This is a convenience method. It could serve as an example for your code of a monitor, For example if you don't need the prints, then copy paste this part of code to your code and modify it to your needs. parameters: monitor: a zmq monitor socket, from calling: my_zmq_socket.get_monitor_socket() loop: an asyncio event loop, from calling zmq.asyncio.asyncio.get_event_loop() , whens starting a thread it does not contains an event loop """ print("libzmq-%s" % zmq.zmq_version()) if zmq.zmq_version_info() < (4, 0): raise RuntimeError("monitoring in libzmq version < 4.0 is not supported") EVENT_MAP = {} print("Event names:") for name in dir(zmq): if name.startswith('EVENT_'): value = getattr(zmq, name) print("%21s : %4i" % (name, value)) EVENT_MAP[value] = name print("\n") asyncio.set_event_loop(loop) async def run_loop() -> None: while True: try: while monitor.poll(): evt: Dict[str, Any] = {} mon_evt = await recv_monitor_message(monitor) evt.update(mon_evt) evt['description'] = EVENT_MAP[evt['event']] print(f"Event: {evt}") if evt['event'] == zmq.EVENT_MONITOR_STOPPED: break except RuntimeError as e: print(e) time.sleep(1) monitor.close() print() print("event monitor thread done!") asyncio.ensure_future(run_loop())
[ 417, 1863, 600, 958 ]
def METHOD_NAME(fn, rate: float, deterministic: bool = False): def attn_fn(scope: Scope, weights: Array): attn_weights = fn(scope, weights) return nn.dropout( scope, attn_weights, deterministic=deterministic, rate=rate ) return attn_fn
[ 41, 3663 ]
def METHOD_NAME(self): self.assertMarkdownRenders( """![Text](http://link.com/".png'title') more text""", """<p><img alt="Text" src="http://link.com/&quot;.png" title="title" /> more text</p>""" )
[ 9, 1474, 2893, -1 ]
def METHOD_NAME(self): pass
[ 447 ]
def METHOD_NAME(self) -> None: """Close and reopen our log file, if supported. This should be overridden where needed.""" return # pragma: no cover
[ 18233 ]