text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self): # Test example from page 4 of # https://www.haystack.mit.edu/tech/vlbi/mark5/docs/230.3.pdf stream_hex = '0000 002D 0330 0000' + 'FFFF FFFF' + '4053 2143 3805 5' self.crc_hex = '284' self.crc12 = CRC(0x180f) self.crcstack12 = CRCStack(0x180f) self.stream_hex = stream_hex.replace(' ', '').lower() self.stream = int(self.stream_hex, base=16) self.bitstream = self.hex_to_stream(self.stream_hex) self.crc = int(self.crc_hex, base=16) self.crcstream = self.hex_to_stream(self.crc_hex)
[ 102, 2 ]
def METHOD_NAME(tokenizer, train_batch_size, eval_batch_size): # Load dataset train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"]) # Preprocess train dataset train_dataset = train_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) train_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) train_features = { x: train_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_train_dataset = tf.data.Dataset.from_tensor_slices((train_features, train_dataset["label"])) # Preprocess test dataset test_dataset = test_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) test_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) test_features = { x: test_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_test_dataset = tf.data.Dataset.from_tensor_slices((test_features, test_dataset["label"])) if SDP_ENABLED: tf_train_dataset = tf_train_dataset.shard(sdp.size(), sdp.rank()) tf_test_dataset = tf_test_dataset.shard(sdp.size(), sdp.rank()) tf_train_dataset = tf_train_dataset.batch(train_batch_size, drop_remainder=True) tf_test_dataset = tf_test_dataset.batch(eval_batch_size, drop_remainder=True) return tf_train_dataset, tf_test_dataset
[ 19, 4146 ]
def METHOD_NAME(self): return self.METHOD_NAME
[ 803, 3547 ]
async def METHOD_NAME(data, objectService): """We can upload a small amount of literal data""" name = f"taskcluster/test/client-py/{taskcluster.slugid.v4()}" await upload.uploadFromBuf( projectId="taskcluster", name=name, contentType="text/plain", contentLength=len(data), expires=taskcluster.fromNow('1 hour'), data=data, objectService=objectService) got, contentType = await download.downloadToBuf( name=name, objectService=objectService) assert got == data assert contentType == 'text/plain'
[ 74, 3217, 8915, 6107, 9 ]
def METHOD_NAME(self, name): return self.has_callback(name) and self.num_callbacks(name) > 0
[ 5334, 1076 ]
def METHOD_NAME(y: Float64Array, x: Float64Array) -> Float64Array: """ Projection of y on x from y Parameters ---------- y : ndarray Array to project (nobs by nseries) x : ndarray Array to project onto (nobs by nvar) Returns ------- ndarray Projected values of y (nobs by nseries) """ if x.shape[1] == 0: return np.zeros_like(y) return x @ (np.linalg.pinv(x) @ y)
[ 5786 ]
def METHOD_NAME(current_actor_context): """ Report if there's configuration for a type we don't recognize. """ current_actor_context.feed(IfCfg( filename="/NM/ifcfg-pigeon0", properties=(IfCfgProperty(name="TYPE", value="AvianCarrier"),) )) current_actor_context.feed(INITSCRIPTS_AND_NM_INSTALLED) current_actor_context.run() assert len(current_actor_context.consume(Report)) == 1 report_fields = current_actor_context.consume(Report)[0].report assert is_inhibitor(report_fields) assert "unsupported device types" in report_fields['title']
[ 9, 17622, 46, 44 ]
def METHOD_NAME(self): """test label formatter for histogram for None""" formatted_label = self.histogram._format_bin_labels(None) assert formatted_label == "null and up"
[ 9, 6069, 636, 2931, 98 ]
def METHOD_NAME(self): return all( self.datasets[key].METHOD_NAME for key in self.datasets )
[ 1466, 1047, 261, 568 ]
def METHOD_NAME() -> None: """ Main function """ # Dir path if len(sys.argv) != 2: sys.stderr.write(f'Usage: {sys.argv[0]} <setup_dir>\n') exit(2) dirpath = sys.argv[1] # Dir non existence if os.path.exists(dirpath): sys.stderr.write(f'Directory: {dirpath} already exists.\n') exit(1) generate_setup_dir(dirpath)
[ 57 ]
def METHOD_NAME(scene): img = ImageMobject( np.uint8([[63, 0, 0, 0], [0, 127, 0, 0], [0, 0, 191, 0], [0, 0, 0, 255]]), ) img.height = 2 img1 = img.copy() img2 = img.copy() img3 = img.copy() img4 = img.copy() img5 = img.copy() img1.set_resampling_algorithm(RESAMPLING_ALGORITHMS["nearest"]) img2.set_resampling_algorithm(RESAMPLING_ALGORITHMS["lanczos"]) img3.set_resampling_algorithm(RESAMPLING_ALGORITHMS["linear"]) img4.set_resampling_algorithm(RESAMPLING_ALGORITHMS["cubic"]) img5.set_resampling_algorithm(RESAMPLING_ALGORITHMS["box"]) scene.add(img1, img2, img3, img4, img5) [s.shift(4 * LEFT + pos * 2 * RIGHT) for pos, s in enumerate(scene.mobjects)] scene.wait()
[ 9, 660, 4239 ]
def METHOD_NAME(devName): devName = resolveDevName(devName) return os.path.exists(os.path.join("/sys/devices/virtual/block/", devName))
[ 137, 162, 398 ]
def METHOD_NAME(elec_txt_dataframe): """Parse keys from EIA series_id string.""" input_ = elec_txt_dataframe.iloc[[2], :] expected = pd.DataFrame( { "series_code": ["RECEIPTS_BTU"], "fuel_agg": ["NG"], "geo_agg": ["US"], "sector_agg": ["2"], "temporal_agg": ["Q"], }, index=[2], ) actual = bulk._extract_keys_from_series_id(input_) pd.testing.assert_frame_equal(actual, expected)
[ 9, 297, 219, 280, 4045, 147 ]
def METHOD_NAME(self, context, data_dict, fields_types, query_dict): '''Modify queries made on datastore_delete The overall design is that every IDatastore extension will receive the ``query_dict`` with the modifications made by previous extensions, then it can add/remove stuff into it before passing it on. You can think of it as pipes, where the ``query_dict`` is being passed to each IDatastore extension in the order they've been loaded allowing them to change the ``query_dict``. The ``datastore`` extension always comes first. The ``query_dict`` is on the form: { 'where': [] } As extensions can both add and remove those keys, it's not guaranteed that any of them will exist when you receive the ``query_dict``, so you're supposed to test the existence of any keys before modifying them. The ``where`` elements are on the form: (format_string, param1, param2, ...) The ``format_string`` isn't escaped for SQL Injection attacks, so everything coming from the user should be in the params list. With this format, you could do something like: ('"age" BETWEEN %s AND %s', age_between[0], age_between[1]) This escapes the ``age_between[0]`` and ``age_between[1]`` making sure we're not vulnerable. After finishing this, you should return your modified ``query_dict``. :param context: the context :type context: dictionary :param data_dict: the parameters received from the user :type data_dict: dictionary :param fields_types: the current resource's fields as dict keys and their types as values :type fields_types: dictionary :param query_dict: the current query_dict, as changed by the IDatastore extensions that ran before yours :type query_dict: dictionary :returns: the query_dict with your modifications :rtype: dictionary ''' return query_dict
[ 914, 34 ]
f METHOD_NAME(self):
[ 9, 2469 ]
def METHOD_NAME(plugins: list[dict[str, str]]) -> list[dict[str, str]]: plugins_dict: dict[str, dict[str, str]] = {} for plugin in plugins: # Plugins from later indexes override others plugin["name"] = plugin["name"].lower() plugins_dict[plugin["name"]] = plugin return sorted(plugins_dict.values(), key=lambda p: p["name"])
[ 3686, 1294 ]
def METHOD_NAME(self): self.file.METHOD_NAME()
[ 1462 ]
def METHOD_NAME(train_data, train_labels, predict_data, nClasses): # Create an algorithm object and call compute train_algo = d4p.bf_knn_classification_training(nClasses=nClasses, fptype="float") train_result = train_algo.METHOD_NAME(train_data, train_labels) # Create an algorithm object and call compute predict_algo = d4p.bf_knn_classification_prediction(nClasses=nClasses, fptype="float") predict_result = predict_algo.METHOD_NAME(predict_data, train_result.model) return predict_result
[ 226 ]
def METHOD_NAME(self): request = Mock(method="BADWOLF") view = Mock() obj = Mock() perm_obj = KolibriAuthPermissions() self.assertFalse(perm_obj.has_object_permission(request, view, obj))
[ 9, 1068, 377, 103 ]
def METHOD_NAME(self, *args, **kwargs): public_doc_records = public_doc_query() for doc in public_doc_records: pdf_name = create_name(doc) # We don't want to delete the original so we are not moving and we can't rename and copy at the same time try: temp_name = grab_doc(pdf_name) add_me = True except ClientError: add_me = False logger.error(f"DOWNLOAD FAILED: {pdf_name}") if add_me is True: url = add_to_public( pdf_name, doc["audit_year"], doc["dbkey"], temp_name ) add_to_model(doc["id"], url)
[ 276 ]
def METHOD_NAME(bucket, file_name): """ Exposes helper method without breaking existing bindings/dependencies """ return storage_service_key_source_function(bucket, file_name)
[ 948, 549, 59 ]
def METHOD_NAME(argv, log=False): """Call nvcc with arguments assembled from argv. Args: argv: A list of strings, possibly the argv passed to main(). log: True if logging is requested. Returns: The return value of calling os.system('nvcc ' + args) """ src_files = [f for f in argv if re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)] if len(src_files) == 0: raise Error('No source files found for cuda compilation.') out_file = [ f for f in argv if f.startswith('/Fo') ] if len(out_file) != 1: raise Error('Please sepecify exactly one output file for cuda compilation.') out = ['-o', out_file[0][len('/Fo'):]] nvcc_compiler_options, argv = GetNvccOptions(argv) opt_option, argv = GetOptionValue(argv, 'O') opt = ['-g', '-G'] if (len(opt_option) > 0 and opt_option[0] != 'd'): opt = ['-O2'] include_options, argv = GetOptionValue(argv, 'I') includes = ["-I " + include for include in include_options] defines, argv = GetOptionValue(argv, 'D') defines = ['-D' + define for define in defines] undefines, argv = GetOptionValue(argv, 'U') undefines = ['-U' + define for define in undefines] # The rest of the unrecongized options should be passed to host compiler host_compiler_options = [option for option in argv if option not in (src_files + out_file)] m_options = ["-m64"] nvccopts = ['-D_FORCE_INLINES'] for capability in supported_cuda_compute_capabilities: capability = capability.replace('.', '') nvccopts += [r'-gencode=arch=compute_%s,"code=sm_%s,compute_%s"' % ( capability, capability, capability)] nvccopts += nvcc_compiler_options nvccopts += undefines nvccopts += defines nvccopts += m_options nvccopts += ['--compiler-options="' + " ".join(host_compiler_options) + '"'] nvccopts += ['-x', 'cu'] + opt + includes + out + ['-c'] + src_files # If we don't specify --keep-dir, nvcc will generate intermediate files under TEMP # Put them under NVCC_TEMP_DIR instead, then Bazel can ignore files under NVCC_TEMP_DIR during dependency check # http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-guiding-compiler-driver # Different actions are sharing NVCC_TEMP_DIR, so we cannot remove it if the directory already exists. if os.path.isfile(NVCC_TEMP_DIR): os.remove(NVCC_TEMP_DIR) if not os.path.exists(NVCC_TEMP_DIR): os.makedirs(NVCC_TEMP_DIR) nvccopts += ['--keep', '--keep-dir', NVCC_TEMP_DIR] cmd = [NVCC_PATH] + nvccopts if log: Log(cmd) proc = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr, env=os.environ.copy(), shell=True) proc.wait() return proc.returncode
[ 4311, 15543 ]
def METHOD_NAME(fmt, *args): s = fmt.format(*args) return (s, len(s))
[ 6569, 275, 772 ]
def METHOD_NAME(list_name, msgid): return "{}/arch/msg/{}/{}".format(settings.MAILING_LIST_ARCHIVE_URL, list_name, hash_list_message_id(list_name, msgid))
[ 363, 277, 274 ]
def METHOD_NAME(self, model): data = model.db.get_unit_operation_parameters("co2_addition") model.fs.unit.load_parameters_from_database() assert model.fs.unit.energy_electric_flow_vol_inlet.fixed assert ( model.fs.unit.energy_electric_flow_vol_inlet.value == data["energy_electric_flow_vol_inlet"]["value"] )
[ 9, 557, 386 ]
def METHOD_NAME(self, train_conf, vars_conf): new_opt_confs = [] for param_group in self.param_groups: assert ( param_group["contiguous_params"] != True ), "contiguous_params cannot be used in graph" optimizer_conf = train_conf.optimizer_conf.add() lr = ( param_group["initial_lr"] if "initial_lr" in param_group else param_group["lr"] ) l2 = param_group["weight_decay"] initial_accumulator_value = param_group["initial_accumulator_value"] lr_decay = param_group["lr_decay"] epsilon = param_group["eps"] optimizer_conf.base_learning_rate = lr self._generate_lr_scale_for_optim_conf(param_group, optimizer_conf) optimizer_conf.adagrad_conf.initial_accumulator_value = ( initial_accumulator_value ) optimizer_conf.adagrad_conf.lr_decay = lr_decay optimizer_conf.adagrad_conf.epsilon = epsilon self._generate_grad_clip_conf_for_optim_conf(param_group, optimizer_conf) for param in param_group.parameters: vars_conf[param].l2 = l2 if param.requires_grad: optimizer_conf.variable_op_names.append(vars_conf[param].name) new_opt_confs.append(optimizer_conf) return new_opt_confs
[ 567, 2546, 43, 303 ]
def METHOD_NAME(self): """Return the precached index of the object. :rtype: int """ # Get the index of the object in its precache table METHOD_NAME = string_tables[self.precache_table][self._path] # Is the object precached? if METHOD_NAME != INVALID_STRING_INDEX: # Return the precache index return METHOD_NAME # If the object was not precached, raise an error raise PrecacheError( '"{0}" was not able to be precached due to the "{1}" table ' 'reaching its limit.'.format(self._path, self.precache_table))
[ 724 ]
def METHOD_NAME(self, method, device): """Test pershot save density matrix instruction""" backend = self.backend(method=method, device=device) # Stabilizer test circuit circ = QuantumCircuit(1) circ.x(0) circ.reset(0) circ.h(0) circ.sdg(0) # Target statevector target = qi.DensityMatrix(circ) # Add save label = "state" circ.save_density_matrix(label=label, pershot=True) # Run shots = 10 result = backend.run(transpile(circ, backend, optimization_level=0), shots=shots).result() self.assertTrue(result.success) simdata = result.data(0) self.assertIn(label, simdata) value = simdata[label] for state in value: self.assertEqual(qi.DensityMatrix(state), target)
[ 9, 73, 2915, 430, 14123 ]
def METHOD_NAME(self, path, mode): path = path.decode(self.encoding) _evil_name(path) return errno_call(self._context, os.METHOD_NAME, path, mode)
[ 8347 ]
def METHOD_NAME(self) -> str: """ Resource Name. """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(self): return bool(self.__flags & DEF_BOUND)
[ 137, 125 ]
def METHOD_NAME(cfg, in_channels): return RetinaNetModule(cfg, in_channels)
[ 56, 18193 ]
def METHOD_NAME(pip_version=None): # type: (Optional[PipVersionValue]) -> bool pip_ver = pip_version or PipVersion.DEFAULT return pip_ver.version < Version("23.2")
[ 1466, 3116, 1836 ]
def METHOD_NAME(self,nb,data=None): tab_num = nb.get_current_page() tab = nb.get_nth_page(tab_num) alloc = tab.get_allocation() x, y, w, h = (alloc.x, alloc.y, alloc.width, alloc.height) pixbuf = self.svg.get_pixbuf_sub(f'#layer{tab_num}').scale_simple(w-10, h-10, GdkPixbuf.InterpType.BILINEAR) im = self.builder.get_object(f'Image{tab_num}') im.set_from_pixbuf(pixbuf) for c in im.get_parent().get_children(): if c.get_has_tooltip(): m = re.findall(r'<!--(\d+),(\d+)-->', c.get_tooltip_markup()) if len(m) > 0: x1 = int(m[0][0]); y1 = int(m[0][1]) c.set_margin_left(max(0, w * x1/1500)) c.set_margin_top(max(0, h * y1/1000))
[ 69, 10401 ]
def METHOD_NAME(d: datetime) -> datetime: # There are two types of datetime objects in Python: naive and aware # Assume any dbt snapshot timestamp that is naive is meant to represent UTC if d is None: return d elif is_aware(d): return d else: return d.replace(tzinfo=pytz.UTC)
[ 197, 24, 2894 ]
def METHOD_NAME(cql, namespaces, fes_version='1.0'): """transforms Common Query Language (CQL) query into OGC fes1 syntax""" filters = [] tmp_list = [] logical_op = None LOGGER.debug('CQL: %s', cql) if fes_version.startswith('1.0'): element_or = 'ogc:Or' element_and = 'ogc:And' element_filter = 'ogc:Filter' element_propertyname = 'ogc:PropertyName' element_literal = 'ogc:Literal' elif fes_version.startswith('2.0'): element_or = 'fes20:Or' element_and = 'fes20:And' element_filter = 'fes20:Filter' element_propertyname = 'fes20:Literal' if ' or ' in cql: logical_op = etree.Element(util.nspath_eval(element_or, namespaces)) tmp_list = cql.split(' or ') elif ' OR ' in cql: logical_op = etree.Element(util.nspath_eval(element_or, namespaces)) tmp_list = cql.split(' OR ') elif ' and ' in cql: logical_op = etree.Element(util.nspath_eval(element_and, namespaces)) tmp_list = cql.split(' and ') elif ' AND ' in cql: logical_op = etree.Element(util.nspath_eval(element_and, namespaces)) tmp_list = cql.split(' AND ') if tmp_list: LOGGER.debug('Logical operator found (AND/OR)') else: tmp_list.append(cql) for t in tmp_list: filters.append(_parse_condition(t, fes_version)) root = etree.Element(util.nspath_eval(element_filter, namespaces)) if logical_op is not None: root.append(logical_op) for flt in filters: condition = etree.Element(util.nspath_eval(flt[0], namespaces)) etree.SubElement( condition, util.nspath_eval(element_propertyname, namespaces)).text = flt[1] etree.SubElement( condition, util.nspath_eval(element_literal, namespaces)).text = flt[2] if logical_op is not None: logical_op.append(condition) else: root.append(condition) LOGGER.debug('Resulting OGC Filter: %s', etree.tostring(root, pretty_print=1)) return root
[ -1 ]
def METHOD_NAME(redis, key: str) -> Any: """ Gets a JSON serialized value from the cache. """ cached_value = redis.get(key) if cached_value: logger.debug(f"Redis Cache - hit {key}") try: deserialized = json.loads(cached_value) return deserialized except Exception as e: logger.warning(f"Unable to deserialize json cached response: {e}") # In the case we are unable to deserialize, delete the key so that # it may be properly re-cached. redis.delete(key) return None logger.debug(f"Redis Cache - miss {key}") return None
[ 19, 763, 175, 59 ]
def METHOD_NAME(self) -> None: self.store.METHOD_NAME() try: flask.current_app.config.METHOD_NAME() except RuntimeError: pass
[ 537 ]
def METHOD_NAME(self, description, uuids=None, report=None): """Check that the delta description is correct.""" report = report if report is not None else self.report uuids = sorted(uuids or [REPORT_ID, NOTIFICATION_DESTINATION_ID]) self.assertEqual({"uuids": uuids, "email": self.email, "description": description}, report["delta"])
[ 638, 1364 ]
def METHOD_NAME(mocker, cobbler_api): # Arrange mock_builtins_open = mocker.patch("builtins.open", mocker.mock_open()) mock_system = System(cobbler_api) mock_system.name = "test_manager_regen_ethers_system" mock_system.interfaces = {"default": NetworkInterface(cobbler_api)} mock_system.interfaces["default"].dns_name = "host.example.org" mock_system.interfaces["default"].mac_address = "aa:bb:cc:dd:ee:ff" mock_system.interfaces["default"].ip_address = "192.168.1.2" mock_system.interfaces["default"].ipv6_address = "::1" dnsmasq.MANAGER = None test_manager = dnsmasq.get_manager(cobbler_api) test_manager.systems = [mock_system] # Act test_manager.regen_ethers() # Assert mock_builtins_open.assert_called_once_with("/etc/ethers", "w+", encoding="UTF-8") write_handle = mock_builtins_open() write_handle.write.assert_called_once_with("AA:BB:CC:DD:EE:FF\t192.168.1.2\n")
[ 9, 722, 9351, -1 ]
async def METHOD_NAME(self, key, user=None, is_iter=False): """ Process a dict lookup message """ logging.debug("Looking up {} for {}".format(key, user)) orig_key = key # Priv and shared keys are handled slighlty differently key_type, key = key.decode("utf8").split("/", 1) try: result = await self.dict.get( key, ns=((user.decode("utf8") if user else self.user) if key_type == "priv" else None) ) if type(result) is str: response = result.encode("utf8") elif type(result) is bytes: response = result else: response = json.dumps(result).encode("ascii") return await (self.reply(b"O", orig_key, response) if is_iter else self.reply(b"O", response)) except KeyError: return await self.reply(b"N")
[ 356, 1906 ]
def METHOD_NAME(self): """Check that all foreign keys are created correctly""" # filter questions on survey questions = list(StudentQuestionBase.objects.filter(survey=self.survey)) self.assertTrue(self.grading_q.pk in [q.pk for q in questions]) self.assertTrue(self.slider_q.pk in [q.pk for q in questions]) self.assertTrue(self.grading_ans.question, self.grading_q) self.assertTrue(self.slider_ans.question, self.slider_q) # filter workfields on survey workfields = list(WorkField.objects.filter(survey=self.survey)) self.assertTrue(self.wfield1 in workfields) self.assertTrue(self.wfield2 in workfields) self.assertFalse(self.wfield_no_survey in workfields) # filter workfield answers on student to make sure we get them all work_answers = list(StudentAnswerWorkField.objects.filter(student=self.student)) self.assertTrue(self.wfieldans1 in work_answers) self.assertTrue(self.wfieldans2 in work_answers) self.assertEqual(len(work_answers), 2)
[ 9, -1, 219 ]
def METHOD_NAME(self): self.args = [ "command_dummy", "--host", TESTSERVER_URL + "/service?request=GetCapabilities", ] with open(SERVICE_EXCEPTION_FILE, "rb") as fp: capabilities_doc = fp.read() with mock_httpd( TESTSERVER_ADDRESS, [ ( { "path": "/service?request=GetCapabilities&version=1.1.1&service=WMS", "method": "GET", }, {"status": "200", "body": capabilities_doc}, ) ], ): with capture() as (out, err): with pytest.raises(SystemExit): wms_capabilities_command(self.args) error_msg = err.getvalue().rsplit("-" * 80, 1)[1].strip() assert "Not a capabilities document" in error_msg
[ 9, 549, 442 ]
def METHOD_NAME(self): if LXML_PRESENT: self.assertEqual(registry.lookup('html'), LXMLTreeBuilder) self.assertEqual(registry.lookup('xml'), LXMLTreeBuilderForXML) else: self.assertEqual(registry.lookup('xml'), None) if HTML5LIB_PRESENT: self.assertEqual(registry.lookup('html'), HTML5TreeBuilder) else: self.assertEqual(registry.lookup('html'), HTMLParserTreeBuilder)
[ 9, 1906, 604, 7469, 44 ]
def METHOD_NAME(n_fp, order): """ Prepare DOF permutation vector for each possible facet orientation. """ from sfepy.base.base import dict_to_array if n_fp == 2: mtx = make_line_matrix(order) ori_map = ori_line_to_iter fo = order - 1 elif n_fp == 3: mtx = make_triangle_matrix(order) ori_map = ori_triangle_to_iter fo = order - 2 elif n_fp == 4: mtx = make_square_matrix(order) ori_map = {} for key, val in six.iteritems(_quad_ori_groups): ori_map[key] = ori_square_to_iter[val] fo = order - 1 else: raise ValueError('unsupported number of facet points! (%d)' % n_fp) dof_perms = {} for key, itfun in six.iteritems(ori_map): dof_perms[key] = [mtx[ii] for ii in itfun(fo)] dof_perms = dict_to_array(dof_perms) return dof_perms
[ 19, 1890, 7212, 13501 ]
def METHOD_NAME(self) -> List[NamedUser]: ...
[ 9579 ]
def METHOD_NAME(): model = SemanticSegmentation(2) model.eval() model.serve()
[ 9, 3124 ]
def METHOD_NAME(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict())
[ 24, 3 ]
def METHOD_NAME(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm.""" return utils.clip_grad_norm_(self.params, max_norm, aggregate_norm_fn)
[ 4226, 140, 387 ]
def METHOD_NAME(agent_args, technologies, stock): from muse.agents.agent import Agent from muse.agents.factories import create_agent agent_args["share"] = "agent_share_zero" agent = create_agent( agent_type="Retrofit", technologies=technologies, capacity=stock.capacity, search_rules="from_techs->compress", year=2010, **agent_args, ) assert isinstance(agent, Agent) assert len(agent.assets.capacity) == 0 assert "asset" in agent.assets.dims and len(agent.assets.asset) == 0 assert "year" in agent.assets.dims or len(agent.assets.year) > 1 assert "region" not in agent.assets.dims assert "commodity" not in agent.assets.dims
[ 9, 946, -1, 61, -1 ]
def METHOD_NAME(self, handler: ErrorHandler[object] | None) -> None: ...
[ 0, 168, 1519 ]
def METHOD_NAME(self): pass
[ 709, 710 ]
def METHOD_NAME( repo, settings, fs=None, prefix: Optional[Tuple[str, ...]] = None, hash_name: Optional[str] = None, **kwargs, ): from dvc.fs import get_cloud_fs if not settings: return None cls, config, fs_path = get_cloud_fs(repo.config, **settings) fs = fs or cls(**config) if prefix: fs_path = fs.path.join(fs_path, *prefix) if hash_name: config["hash_name"] = hash_name return get_odb(fs, fs_path, state=repo.state, **config)
[ 19, -1 ]
def METHOD_NAME(self): pass
[ 709, 710 ]
def METHOD_NAME(self, skip_run=False, executable=None): # start with the run command cmd = [] if skip_run else self.build_run_cmd(executable=executable) # add arguments and insert dummary key value separators which are replaced with "=" later for key, value in self.args: cmd.extend([key, self.arg_sep, value]) cmd = " ".join(quote_cmd([c]) for c in cmd) cmd = cmd.replace(" " + self.arg_sep + " ", "=") return cmd
[ 56 ]
def METHOD_NAME(self): raise ValueError("this dispatcher is not writable")
[ 276, 77, 417 ]
f METHOD_NAME(self):
[ 19, 2456 ]
def METHOD_NAME(self): """Exporter EntryPoint to call.""" return self.get_record_value('entry_point')
[ 475, 1669 ]
def METHOD_NAME(scope): if scope == "list": return [None] elif scope in ["create", "import:backup"]: return [ { "owner": {"id": random.randrange(400, 500)}, "assignee": {"id": random.randrange(500, 600)}, "organization": {"id": random.randrange(600, 700)}, "user": {"num_resources": count}, } for count in (0, 1, 3, 10) ] else: return [ { "id": random.randrange(300, 400), "owner": {"id": random.randrange(400, 500)}, "assignee": {"id": random.randrange(500, 600)}, "organization": {"id": random.randrange(600, 700)}, } ]
[ 1614 ]
def METHOD_NAME(input_event, relation, text_encoder, max_e1, max_r, force): abort = False e1_tokens, rel_tokens, _ = data.conceptnet_data.do_example(text_encoder, input_event, relation, None) if len(e1_tokens) > max_e1: if force: XMB = torch.zeros(1, len(e1_tokens) + max_r).long().to(settings.device) else: XMB = torch.zeros(1, max_e1 + max_r).long().to(settings.device) return {}, True else: XMB = torch.zeros(1, max_e1 + max_r).long().to(settings.device) XMB[:, : len(e1_tokens)] = torch.LongTensor(e1_tokens) XMB[:, max_e1 : max_e1 + len(rel_tokens)] = torch.LongTensor(rel_tokens) batch = {} batch["sequences"] = XMB batch["attention_mask"] = data.conceptnet_data.make_attention_mask(XMB) return batch, abort
[ 0, -1, 1461 ]
def METHOD_NAME(self): return self.maxsize > 0 and len(self.queue) == self.maxsize
[ 324 ]
def METHOD_NAME(self, item, identity_field=None): identity = {} from_ = item if isinstance(item, dict) and 'data' in item: from_ = item['data']['message'][identity_field] identity['username'] = from_.get('username', None) identity['email'] = None identity['name'] = from_.get('first_name', None) return identity
[ 19, 4449, 2989 ]
def METHOD_NAME(self): return self.client.format_url( "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redisEnterprise/{clusterName}/databases/{databaseName}", **self.url_parameters )
[ 274 ]
def METHOD_NAME(test_file): with Image.open(test_file) as im: assert im.tell() == 0 # prior to first image raises an error, both blatant and borderline with pytest.raises(EOFError): im.seek(-1) with pytest.raises(EOFError): im.seek(-523) # after the final image raises an error, # both blatant and borderline with pytest.raises(EOFError): im.seek(2) with pytest.raises(EOFError): im.seek(523) # bad calls shouldn't change the frame assert im.tell() == 0 # this one will work im.seek(1) assert im.tell() == 1 # and this one, too im.seek(0) assert im.tell() == 0
[ 9, 336 ]
def METHOD_NAME(): """See base_runner.""" return True
[ 220, 1797 ]
def METHOD_NAME(configuration_policy_group_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, vpn_server_configuration_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetConfigurationPolicyGroupResult]: """ Retrieves the details of a ConfigurationPolicyGroup. :param str configuration_policy_group_name: The name of the ConfigurationPolicyGroup being retrieved. :param str resource_group_name: The resource group name of the VpnServerConfiguration. :param str vpn_server_configuration_name: The name of the VpnServerConfiguration. """ ...
[ 19, 830, 54, 846, 146 ]
def METHOD_NAME(wn, tn): if tn is None: # special case. i.e. the last tile. #dont know why it is a special case, if the ALL-1 is received with a tile #then it should always be one, if the tile is consired received by the method #below, then it should not be false #i think the problem is when the sender does not know if it is the last one #so the the bitmap is received with the max_fcn bit on 1, but since there are #less tiles than the max_fcn. it does not look for that bit dprint("last tile case") #self.all_tiles[-1]["sent"] = False return # normal case. counter = 0 dprint('unset_sent_flag_do') for t in self.all_tiles: if t["w-num"] == wn: if t["t-num"] == self.max_fcn - tn: counter += 1 dprint('counter = {}, t-num {}, tn {}'.format(counter, t["t-num"],tn)) t["sent"] = False elif t["t-num"] == self.max_fcn: dprint("t-num {} == max_fcn {}".format(t["t-num"],self.max_fcn))
[ 4132, 2876, 584, 74 ]
def METHOD_NAME( self, painter: QtGui.QPainter, option: QtWidgets.QStyleOptionGraphicsItem, # pylint: disable=unused-argument widget: Optional[QtWidgets.QWidget] = ..., ): # pylint: disable=unused-argument self._paint_boundary(painter)
[ 5932 ]
def METHOD_NAME(self): """Configure the DUT (Router in our case) which we use for testing the EMU functionality prior to the test""" sys.stdout.flush() if not CTRexScenario.router_cfg['no_dut_config']: sys.stdout.write('Configuring DUT... ') start_time = time.time() CTRexScenario.router.load_clean_config() CTRexScenario.router.configure_basic_interfaces() sys.stdout.write('Done. (%ss)\n' % int(time.time() - start_time))
[ 200, 1290 ]
def METHOD_NAME(self, peer_id: ID) -> PublicKey: """ :param peer_id: peer ID to get public key for :return: public key of the peer :raise PeerStoreError: if peer ID not found """
[ 9600 ]
f METHOD_NAME(self, flag):
[ 0, 4160 ]
def METHOD_NAME(self, get_ipython, clear_output): """ Context manager that monkeypatches get_ipython and clear_output """ original_clear_output = widget_output.clear_output original_get_ipython = widget_output.get_ipython widget_output.get_ipython = get_ipython widget_output.clear_output = clear_output try: yield finally: widget_output.clear_output = original_clear_output widget_output.get_ipython = original_get_ipython
[ 4331, 4741 ]
def METHOD_NAME(): with pytest.raises(ImportError): from ddtrace.constants import INVALID_CONSTANT # noqa
[ 9, 532 ]
def METHOD_NAME(lnst_config): for preset_name in PRESETS: preset = lnst_config.get_option("colours", preset_name) if preset == None: continue fg, bg, bf = preset extended_re = "^extended\([0-9]+\)$" if fg == "default": fg = None elif not re.match(extended_re, fg) and fg not in list(COLOURS.keys()): raise Exception("Colour '%s' not supported" % fg) if bg == "default": bg = None elif not re.match(extended_re, bg) and bg not in list(COLOURS.keys()): raise Exception("Colour '%s' not supported" % bg) PRESETS[preset_name] = [fg, bg, bool_it(bf)]
[ 557, 8435, 280, 200 ]
def METHOD_NAME(self) -> LoggingMode: """ Get the logger's mode. :return: The logger mode. """ return self._mode
[ 854 ]
def METHOD_NAME(data): """ Calculates the CRC32C checksum of the provided data. Args: data: the bytes over which the checksum should be calculated. Returns: An int representing the CRC32C checksum of the provided bytes. """ return _crc32c(six.ensure_binary(data))
[ 17276 ]
def METHOD_NAME(self, model): pass # Not needed
[ 238, 44 ]
def METHOD_NAME(self): size = 100 matrix = torch.randn(size, size, dtype=torch.float64) matrix = matrix.matmul(matrix.mT) matrix.div_(matrix.norm()) matrix.add_(torch.eye(matrix.size(-1), dtype=torch.float64).mul_(1e-1)) # set up vector rhs rhs = torch.randn(size, dtype=torch.float64) # basic solve solves = linear_cg(matrix.matmul, rhs=rhs, max_iter=size) # solve with init value init = torch.randn(size, dtype=torch.float64) solves_with_init = linear_cg(matrix.matmul, rhs=rhs, max_iter=size, initial_guess=init) # Check cg matrix_chol = torch.linalg.cholesky(matrix) actual = torch.cholesky_solve(rhs.unsqueeze(dim=1), matrix_chol).squeeze() self.assertTrue(torch.allclose(solves, actual, atol=1e-3, rtol=1e-4)) self.assertTrue(torch.allclose(solves_with_init, actual, atol=1e-3, rtol=1e-4)) # set up matrix rhs numcols = 50 rhs = torch.randn(size, numcols, dtype=torch.float64) # basic solve solves = linear_cg(matrix.matmul, rhs=rhs, max_iter=size) # solve with init value init = torch.randn(size, numcols, dtype=torch.float64) solves_with_init = linear_cg(matrix.matmul, rhs=rhs, max_iter=size, initial_guess=init) # Check cg actual = torch.cholesky_solve(rhs, matrix_chol) self.assertTrue(torch.allclose(solves, actual, atol=1e-3, rtol=1e-4)) self.assertTrue(torch.allclose(solves_with_init, actual, atol=1e-3, rtol=1e-4))
[ 9, 10452 ]
def METHOD_NAME(): if Promise.unhandled_exceptions: for exctype, value, tb in Promise.unhandled_exceptions: if value: raise value.with_traceback(tb) # traceback.print_exception(exctype, value, tb)
[ -1, 10853 ]
def METHOD_NAME(src_lines, var_name, lines): add_comments_header(lines) lines.append("const char *{0} =".format(var_name)) for src_line in src_lines: lines.append('"' + cpp_escape(src_line) + "\\n\"") lines[len(lines) - 1] += ';' add_comments_footer(lines)
[ 238, 7728, 144, 1479 ]
def METHOD_NAME(self): return os.path.join(self.save_path, 'train')
[ 849, 157 ]
def METHOD_NAME(): mc = MailChimp(mc_api=app.config["MAILCHIMP_KEY"]) try: email = request.form.get("email") try: data = mc.lists.members.create_or_update( list_id=app.config["MAILCHIMP_LIST"], subscriber_hash=get_subscriber_hash(email), data={ "email_address": email, "status": "subscribed", "status_if_new": "pending", }, ) status = data.get("status") if status == "pending": flash( "Thanks for subscribing! You will receive a confirmation email shortly." ) elif status == "subscribed": flash("You were already subscribed! Thanks for checking back.") else: raise ValueError("Unexpected status %s" % status) except ValueError as e: # ugh, this library is awful app.logger.info( "ValueError from mailchimp3 %s, assuming bad email: %r", e, email ) flash("Your email address was not accepted - please check and try again.") except MailChimpError as e: # Either the JSON, or a dictionary containing the response (data,) = e.args if data.get("status") != 400: raise title = data.get("title") if title == "Member In Compliance State": app.logger.info("Member in compliance state: %r", email) flash( """You've already been unsubscribed from our list, so we can't add you again. Please contact %s to update your settings.""" % app.config["TICKETS_EMAIL"][1] ) elif title == "Invalid Resource": app.logger.warn( "Invalid Resource from MailChimp, likely bad email or rate limited: %r", email, ) flash( """Your email address was not accepted - please check and try again. If you've signed up to other lists recently, please wait 48 hours.""" ) else: app.logger.warn("MailChimp returned %s: %s", title, data.get("detail")) flash("Sorry, an error occurred: %s." % (title or "unknown")) except Exception as e: app.logger.exception("Error subscribing: %r", e) flash("Sorry, an error occurred.") return redirect(url_for(".main"))
[ 57, 72 ]
def METHOD_NAME(): vip = utils.config_get('vip') vip_iface = utils.config_get('vip_iface') vip_cidr = utils.config_get('vip_cidr') corosync_bindiface = utils.config_get('ha-bindiface') corosync_mcastport = utils.config_get('ha-mcastport') if None in [vip, vip_cidr, vip_iface]: utils.juju_log('WARNING', 'Insufficient VIP information to configure cluster') sys.exit(1) # Starting configuring resources. init_services = {'res_mysqld': 'mysql'} # If the 'ha' relation has been made *before* the 'ceph' relation, # it doesn't make sense to make it until after the 'ceph' relation is made if not utils.is_relation_made('ceph', 'auth'): utils.juju_log('INFO', '*ceph* relation does not exist. ' 'Not sending *ha* relation data yet') return else: utils.juju_log('INFO', '*ceph* relation exists. Sending *ha* relation data') block_storage = 'ceph' if utils.config_get('prefer-ipv6'): res_mysql_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' vip_cidr = '64' else: res_mysql_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' resources = { 'res_mysql_rbd': 'ocf:ceph:rbd', 'res_mysql_fs': 'ocf:heartbeat:Filesystem', 'res_mysql_vip': res_mysql_vip, 'res_mysqld': 'upstart:mysql'} rbd_name = utils.config_get('rbd-name') resource_params = { 'res_mysql_rbd': 'params name="%s" pool="%s" user="%s" ' 'secret="%s"' % (rbd_name, POOL_NAME, SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)), 'res_mysql_fs': 'params device="/dev/rbd/%s/%s" directory="%s" ' 'fstype="ext4" op start start-delay="10s"' % (POOL_NAME, rbd_name, DATA_SRC_DST), 'res_mysql_vip': 'params "%s"="%s" cidr_netmask="%s" nic="%s"' % (vip_params, vip, vip_cidr, vip_iface), 'res_mysqld': 'op start start-delay="5s" op monitor interval="5s"'} groups = { 'grp_mysql': 'res_mysql_rbd res_mysql_fs res_mysql_vip res_mysqld'} for rel_id in utils.relation_ids('ha'): utils.relation_set(rid=rel_id, block_storage=block_storage, corosync_bindiface=corosync_bindiface, corosync_mcastport=corosync_mcastport, resources=resources, resource_params=resource_params, init_services=init_services, groups=groups)
[ 3907, 2043, 2624 ]
def METHOD_NAME( query: str, offset: int, length: int, expected_result: Any, expected_num_rows_total: int ) -> None: # simulate index file index_file_location = "index.duckdb" con = duckdb.connect(index_file_location) con.execute("INSTALL 'httpfs';") con.execute("LOAD 'httpfs';") con.execute("INSTALL 'fts';") con.execute("LOAD 'fts';") con.sql("CREATE OR REPLACE SEQUENCE serial START 0 MINVALUE 0;") sample_df = pd.DataFrame( { "text": [ "Grand Moff Tarkin and Lord Vader are interrupted in their discussion by the buzz of the comlink", "There goes another one.", "Vader turns round and round in circles as his ship spins into space.", "We count thirty Rebel ships.", "The wingman spots the pirateship coming at him and warns the Dark Lord", ] }, dtype=pd.StringDtype(storage="python"), ) create_command_sql = "CREATE OR REPLACE TABLE data AS SELECT nextval('serial') AS __hf_index_id, * FROM sample_df" con.sql(create_command_sql) con.execute(query="SELECT COUNT(*) FROM data;").fetchall() assert sample_df.size == con.execute(query="SELECT COUNT(*) FROM data;").fetchall()[0][0] con.sql("PRAGMA create_fts_index('data', '__hf_index_id', '*', overwrite=1);") con.close() # assert search results (num_rows_total, pa_table) = full_text_search(index_file_location, query, offset, length) assert num_rows_total is not None assert pa_table is not None assert num_rows_total == expected_num_rows_total fields = [pa.field("__hf_index_id", pa.int64()), pa.field("text", pa.string())] filtered_df = pd.DataFrame(expected_result) expected_table = pa.Table.from_pandas(filtered_df, schema=pa.schema(fields), preserve_index=False) assert pa_table == expected_table # ensure that database has not been modified con = duckdb.connect(index_file_location) assert sample_df.size == con.execute(query="SELECT COUNT(*) FROM data;").fetchall()[0][0] con.close() os.remove(index_file_location)
[ 9, 324, 526, 1070 ]
async def METHOD_NAME( db: AsyncSession = Depends(get_async_db), form_data: OAuth2PasswordRequestForm = Depends(),
[ 273, 43, 1089, 466 ]
def METHOD_NAME(cls): super().METHOD_NAME() cls.sth_related = SomethingRelated.objects.create(name='Rel1') cls.pol_1 = PolymorphicModelTest.objects.create( name='Pol1', sth_related=cls.sth_related ) cls.pol_2 = PolymorphicModelTest.objects.create( name='Pol2', sth_related=cls.sth_related ) cls.pol_3 = PolymorphicModelTest2.objects.create( name='Pol3', another_related=cls.sth_related, )
[ 0, 1, 2 ]
def METHOD_NAME(self): return []
[ 19, 2537, 24, 673, 2428 ]
def METHOD_NAME(self): factory = RequestFactory() bad_referers = ( "http://otherdomain/bar/", "http://otherdomain/admin/forms/form/", ) for referer in bad_referers: with self.subTest(referer=referer): request = factory.get( "/api/v1/foo", HTTP_REFERER=referer, SCRIPT_NAME="/of" ) self.assertFalse(is_admin_request(request))
[ 9, 137, 2870, 377, 1168, 41, 4486 ]
METHOD_NAME(self):
[ 22 ]
def METHOD_NAME(self): 'od.keys() -> list of keys in od' return list(self)
[ 219 ]
def METHOD_NAME(minion_id, package_name, state_tree): module_contents = """ def get_test_package_name(): return "{}" """.format( package_name ) top_file_contents = """ base: {}: - install-package """.format( minion_id ) install_package_sls_contents = """ state-entry-contém-unicode: pkg.installed: - name: {{ salt.pkgnames.get_test_package_name() }} """ with pytest.helpers.temp_file( "_modules/pkgnames.py", module_contents, state_tree, ), pytest.helpers.temp_file( "top.sls", top_file_contents, state_tree ), pytest.helpers.temp_file( "install-package.sls", install_package_sls_contents, state_tree, ): # Run the test yield
[ 131, 551, 151 ]
def METHOD_NAME(vineyard_client): df = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}) object_id = vineyard_client.put(df) pd.testing.assert_frame_equal(df, vineyard_client.get(object_id))
[ 9, 2842, 1616 ]
def METHOD_NAME(self) -> str: """ Resource type. """ return pulumi.get(self, "type")
[ 44 ]
def METHOD_NAME(self): """ Test rectangular transfer of self.d_array1 to self.d_array2 """ # Reference o1 = self.offset1 o2 = self.offset2 T = self.transfer_shape logger.info("""Testing D->D rectangular copy with (N1_y, N1_x) = %s, (N2_y, N2_x) = %s: array2[%d:%d, %d:%d] = array1[%d:%d, %d:%d]""" % ( str(self.shape1), str(self.shape2), o2[0], o2[0] + T[0], o2[1], o2[1] + T[1], o1[0], o1[0] + T[0], o1[1], o1[1] + T[1] ) ) self.array2[o2[0]:o2[0] + T[0], o2[1]:o2[1] + T[1]] = self.array1[o1[0]:o1[0] + T[0], o1[1]:o1[1] + T[1]] kernel_args = ( self.d_array2.data, self.d_array1.data, np.int32(self.shape2[1]), np.int32(self.shape1[1]), np.int32(self.offset2[::-1]), np.int32(self.offset1[::-1]), np.int32(self.transfer_shape[::-1]) ) wg = None ndrange = self.transfer_shape[::-1] self.program.cpy2d(self.queue, ndrange, wg, *kernel_args) res = self.d_array2.get() self.compare(res, self.array2)
[ 9, -1 ]
def METHOD_NAME(self, view_size): if self.selection_valid(): before_selection = view_size // 2 self.view.start = max(0, self.selection - before_selection) self.view.end = self.view.start self.max_view_size = view_size self._expand_view() else: self.max_view_size = view_size self._reset_view()
[ 1128, 1179 ]
def METHOD_NAME(event): """ Control-P in vi edit mode on readline is history next, unlike default prompt toolkit. If completer is open this still select previous completion. """ event.current_buffer.auto_up()
[ 1511, 351, 894, 1511, 1323 ]
def METHOD_NAME(self, X, queue=None): y = super()._predict(X, _backend.linear_model.regression, queue) return y
[ 2103 ]
def METHOD_NAME( record_property, get_host_key, setup_splunk, setup_sc4s, event ): host = get_host_key dt = datetime.datetime.now(datetime.timezone.utc) iso, _, _, _, _, _, epoch = time_operations(dt) # Tune time functions iso = dt.isoformat()[0:23] epoch = epoch[:-3] mt = env.from_string(event + "\n") message = mt.render(mark="<29>1", iso=iso, host=host) sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) st = env.from_string( 'search _time={{ epoch }} index=epav host="{{ host }}" sourcetype="kaspersky:es"' ) search = st.render(epoch=epoch, host=host) result_count, _ = splunk_single(setup_splunk, search) record_property("host", host) record_property("resultCount", result_count) record_property("message", message) assert result_count == 1
[ 9, -1 ]
def METHOD_NAME(self) -> None: warnings.filterwarnings("default", category=DeprecationWarning) warnings.filterwarnings("ignore", category=DeprecationWarning, module="pyscf") warnings.filterwarnings(action="ignore", category=DeprecationWarning, module=".*drivers*") warnings.filterwarnings( action="default", category=DeprecationWarning, module=".*second_q.drivers.*" ) warnings.filterwarnings( action="ignore", category=DeprecationWarning, module=".*transformers*" ) warnings.filterwarnings( action="default", category=DeprecationWarning, module=".*second_q.transformers.*", ) # ignore opflow/gradients/natural_gradient warnings.filterwarnings("ignore", category=RuntimeWarning, module="qiskit") self._started_at = time.time() self._class_location = __file__
[ 0, 1 ]
def METHOD_NAME(self, channel, on): pass
[ 307, 69, 3988 ]