n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
78
0
2
12
tests/unit/test_pyimodulegraph.py
262,872
Fix typos (#6782) [skip ci]
pyinstaller
11
Python
47
test_pyimodulegraph.py
def test_graph_collects_script_dependencies(fresh_pyi_modgraph, tmpdir): mg = fresh_pyi_modgraph # self-test 1: uuid is not included in the graph by default src1 = gen_sourcefile(tmpdir, , test_id="1") node = mg.add_script(str(src1)) assert node is not None assert not mg.find_node("uuid") # self-test # Add script importing uuid src2 = gen_sourcefile(tmpdir, , test_id="2") mg.add_script(str(src2)) assert mg.find_node("uuid") # self-test # The actual test: uuid is (indirectly) linked to the first script names = [n.identifier for n in mg.iter_graph(start=node)] assert str(src2) in names assert "uuid" in names
1a7d704ffbabb433007e3ba04750c2f13ade48e5
103
https://github.com/pyinstaller/pyinstaller.git
121
def test_graph_collects_script_dependencies(fresh_pyi_modgraph, tmpdir): mg = fresh_pyi_modgraph # self-test 1: uuid is not included in the graph by default src1 = gen_sourcefile(tmpdir, , test_id="1") node = mg.add_script(str(src1)) assert node is not None assert not mg.find_node("uu
17
177
test_graph_collects_script_dependencies
55
0
1
8
pandas/tests/io/test_sql.py
162,995
ENH: to_sql returns rowcount (#45137)
pandas
12
Python
42
test_sql.py
def test_nan_string(self): # NaNs in string column df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]}) assert df.to_sql("test_nan", self.conn, index=False) == 3 # NaNs are coming back as None df.loc[2, "B"] = None # with read_table result = sql.read_sql_table("test_nan", self.conn) tm.assert_frame_equal(result, df) # with read_sql result = sql.read_sql_query("SELECT * FROM test_nan", self.conn) tm.assert_frame_equal(result, df)
3dfed3fcd552dcbf4daf7f78c82a87638f896512
100
https://github.com/pandas-dev/pandas.git
131
def test_nan_string(self): # NaNs in string c
16
165
test_nan_string
27
0
5
8
src/prefect/blocks/core.py
56,574
Adds capability filtering to block schema filter route
prefect
11
Python
22
core.py
def get_block_capabilities(cls): base_block_capabilities = [ getattr(base, "_block_schema_capabilities", []) or [] for base in cls.__bases__ ] return list( {c for capabilities in base_block_capabilities for c in capabilities} )
1e29ed45c704bb4b652e15134e95bcbdb77e73a5
42
https://github.com/PrefectHQ/prefect.git
87
def get_block_capabilities(cls): base_block_capabilities = [
9
64
get_block_capabilities
47
0
1
14
pandas/tests/arithmetic/test_period.py
172,325
API: dont do type inference on arithmetic results (#49714) * API: dont do type inference on arithmetic results * mypy fixup * use concat_compat * dont infer in TimedeltaArray * update addsub * avoid messing with box_expected
pandas
14
Python
31
test_period.py
def test_parr_add_sub_object_array(self): pi = period_range("2000-12-31", periods=3, freq="D") parr = pi.array other = np.array([Timedelta(days=1), pd.offsets.Day(2), 3]) with tm.assert_produces_warning(PerformanceWarning): result = parr + other expected = PeriodIndex( ["2001-01-01", "2001-01-03", "2001-01-05"], freq="D" )._data.astype(object) tm.assert_equal(result, expected) with tm.assert_produces_warning(PerformanceWarning): result = parr - other expected = PeriodIndex(["2000-12-30"] * 3, freq="D")._data.astype(object) tm.assert_equal(result, expected)
35a7f807ac9f02128333c1b5df0f03c897d13445
136
https://github.com/pandas-dev/pandas.git
149
def test_parr_add_sub_object_array(self): pi = period_range("2000-12-31", periods=3, freq="D") parr = pi.array other = np.array([Timedelta(days=1), pd.offsets.Day(2), 3]) with tm.assert_produces_warning(PerformanceWarning): result = parr + other expected = PeriodIndex( ["2001-01-01", "2001-01-03", "2001-01-05"], freq="D" )._data.astype(object) tm.assert_equal(result, expected) with tm.a
25
227
test_parr_add_sub_object_array
6
0
1
4
homeassistant/components/openuv/__init__.py
307,870
Allow multiple instances of OpenUV via the `homeassistant.update_entity` service (#76878) * Allow for multiple instances of the OpenUV integration * Docstring * Remove Repairs * Fix tests * Slightly faster OpenUV object lookup * Entity update service * Remove service descriptions * hassfest * Simplify strings * Don't add UI instructions to Repairs item * Add a throttle to entity update * Update homeassistant/components/openuv/__init__.py Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io> * Switch from Throttle to Debouncer(s) * Keep dispatcher for services * Reduce change surface area * Duplicate method * Add issue registry through helper * Update deprecation version * Use config entry selector * Remove device/service info * Remove commented out method * Correct entity IDs and better verbiage * Fix tests * Handle missing config entry ID in service calls * Remove unhelpful comment * Remove unused constants Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io> Co-authored-by: J. Nick Koston <nick@koston.org>
core
7
Python
6
__init__.py
def async_update_state(self) -> None: self.update_from_latest_data() self.async_write_ha_state()
ca5a9c945649f7de9cf58a09f41a33f5ba89b037
18
https://github.com/home-assistant/core.git
27
def async_update_state(self) -> None: self.update_from_latest_data() self.async_write_ha_state()
4
34
async_update_state
46
0
1
13
test/mitmproxy/addons/test_next_layer.py
253,148
[quic] full-stack test
mitmproxy
10
Python
27
test_next_layer.py
def test_next_layer_reverse_udp_mode(self): nl = NextLayer() ctx = MagicMock() ctx.client.alpn = None ctx.server.address = ("example.com", 443) ctx.client.transport_protocol = "udp" ctx.client.proxy_mode.scheme = "udp" ctx.layers = [layers.modes.ReverseProxy(ctx)] assert isinstance(nl._next_layer(ctx, b"", b""), layers.UDPLayer) ctx.layers = [layers.modes.ReverseProxy(ctx)] assert isinstance(nl._next_layer(ctx, dtls_client_hello_with_extensions, b""), layers.ClientTLSLayer) assert len(ctx.layers) == 2 assert isinstance(nl._next_layer(ctx, b"", b""), layers.UDPLayer)
6cf2a1202aaa24156b471e6f0a4c1fd58ad57602
145
https://github.com/mitmproxy/mitmproxy.git
129
def test_next_layer_reverse_udp_mode(self): nl = NextLayer() ctx = MagicMock() ctx.client.alpn = None ctx.server.address = ("example.com", 443) ctx.cli
22
228
test_next_layer_reverse_udp_mode
14
0
3
7
homeassistant/components/alert/__init__.py
288,947
Remove ToggleEntity inheritance from Alert (#80185)
core
9
Python
11
__init__.py
def state(self) -> str: if self._firing: if self._ack: return STATE_OFF return STATE_ON return STATE_IDLE
fc32071562de406c32e75410cd87920f82153856
24
https://github.com/home-assistant/core.git
72
def state(self) -> str: if self._firing: if s
8
41
state
67
0
1
40
src/streamlink/plugins/youtube.py
187,578
plugins: call schema.validate(value) instead of validate.validate(schema, value) in various plugins, so that a proper PluginError gets raised on failure instead of a ValidationError
streamlink
19
Python
45
youtube.py
def _schema_videodetails(cls, data): schema = validate.Schema( { "videoDetails": { "videoId": str, "author": str, "title": str, validate.optional("isLive"): validate.transform(bool), validate.optional("isLiveContent"): validate.transform(bool), validate.optional("isLiveDvrEnabled"): validate.transform(bool), validate.optional("isLowLatencyLiveStream"): validate.transform(bool), validate.optional("isPrivate"): validate.transform(bool), }, "microformat": validate.all( validate.any( validate.all( {"playerMicroformatRenderer": dict}, validate.get("playerMicroformatRenderer") ), validate.all( {"microformatDataRenderer": dict}, validate.get("microformatDataRenderer") ) ), { "category": str } ) }, validate.union_get( ("videoDetails", "videoId"), ("videoDetails", "author"), ("microformat", "category"), ("videoDetails", "title"), ("videoDetails", "isLive") ) ) videoDetails = schema.validate(data) log.trace(f"videoDetails = {videoDetails!r}") return videoDetails
4088bcdf6685ddca7f1400767266d0665a727455
208
https://github.com/streamlink/streamlink.git
731
def _schema_videodetails(cls, data): schema = validate.Schema( { "videoDetails": { "videoId": str, "author": str, "title": str, validate.optional("isLive"): validate.transform(bool), validate.optional("isLiveContent"): validate.transform(bool), validate.optional("isLi
18
357
_schema_videodetails
111
0
5
25
rllib/contrib/alpha_zero/core/mcts.py
133,883
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
19
Python
73
mcts.py
def compute_action(self, node): for _ in range(self.num_sims): leaf = node.select() if leaf.done: value = leaf.reward else: child_priors, value = self.model.compute_priors_and_value(leaf.obs) if self.add_dirichlet_noise: child_priors = (1 - self.dir_epsilon) * child_priors child_priors += self.dir_epsilon * np.random.dirichlet( [self.dir_noise] * child_priors.size ) leaf.expand(child_priors) leaf.backup(value) # Tree policy target (TPT) tree_policy = node.child_number_visits / node.number_visits tree_policy = tree_policy / np.max( tree_policy ) # to avoid overflows when computing softmax tree_policy = np.power(tree_policy, self.temperature) tree_policy = tree_policy / np.sum(tree_policy) if self.exploit: # if exploit then choose action that has the maximum # tree policy probability action = np.argmax(tree_policy) else: # otherwise sample an action according to tree policy probabilities action = np.random.choice(np.arange(node.action_space_size), p=tree_policy) return tree_policy, action, node.children[action]
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
188
https://github.com/ray-project/ray.git
431
def compute_action(self, node): for _ in range(self.num_sims): leaf = node.select() if leaf.done: value = leaf.reward else: child_priors, value = self.model.compute_priors_and_value(leaf.obs) if self.add_dirichlet_noise: child_priors = (1 - self.dir_epsilon) * child_priors child_priors += self.dir_epsilon * np.random.dirichlet( [self.dir_noise] * child_priors.size ) leaf.expand(child_priors) leaf.backup(value) # Tree policy target (TPT)
39
300
compute_action
44
0
1
17
tests/models/test_baseoperator.py
44,750
Straighten up MappedOperator hierarchy and typing (#21505)
airflow
16
Python
38
test_baseoperator.py
def test_expand_mapped_task_instance_skipped_on_zero(dag_maker, session): with dag_maker(session=session): task1 = BaseOperator(task_id="op1") xcomarg = XComArg(task1, "test_key") mapped = MockOperator.partial(task_id='task_2').map(arg2=xcomarg) dr = dag_maker.create_dagrun() session.add( TaskMap(dag_id=dr.dag_id, task_id=task1.task_id, run_id=dr.run_id, map_index=-1, length=0, keys=None) ) mapped.expand_mapped_task(upstream_ti=dr.get_task_instance(task1.task_id), session=session) indices = ( session.query(TaskInstance.map_index, TaskInstance.state) .filter_by(task_id=mapped.task_id, dag_id=mapped.dag_id, run_id=dr.run_id) .order_by(TaskInstance.map_index) .all() ) assert indices == [(-1, TaskInstanceState.SKIPPED)]
0cd3b11f3a5c406fbbd4433d8e44d326086db634
173
https://github.com/apache/airflow.git
123
def test_expand_mapped_task_instance_skipped_on_zero(dag_maker, session): with dag_maker(session=session): task1 = BaseOperator(task_id="op1") xcomarg = XComArg(task1, "test_key") mapped = MockOperator.partial(task_id='task_2').map(arg2=xcomarg) dr = dag_maker.create_dagrun() session.add( TaskMap(dag_id=dr.dag_id, task_id=task1.task_id, run_id=dr.run_id, map_index=-1, length=0, keys=None) ) mapped.expand_mapped_task(upstream_ti=dr.get_task_instance(task1.task_id), session=session) indices = ( session.query(TaskInstance.map_index, T
34
270
test_expand_mapped_task_instance_skipped_on_zero
208
1
11
53
tests/freqai/test_freqai_interface.py
151,853
Add 3ac test
freqtrade
16
Python
128
test_freqai_interface.py
def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, dbscan, float32): if is_arm() and model == 'CatboostRegressor': pytest.skip("CatBoost is not supported on ARM") if is_mac() and 'Reinforcement' in model: pytest.skip("Reinforcement learning module not available on intel based Mac OS") model_save_ext = 'joblib' freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"strategy": "freqai_test_strat"}) freqai_conf['freqai']['feature_parameters'].update({"principal_component_analysis": pca}) freqai_conf['freqai']['feature_parameters'].update({"use_DBSCAN_to_remove_outliers": dbscan}) freqai_conf.update({"reduce_df_footprint": float32}) if 'ReinforcementLearner' in model: model_save_ext = 'zip' freqai_conf = make_rl_config(freqai_conf) # test the RL guardrails freqai_conf['freqai']['feature_parameters'].update({"use_SVM_to_remove_outliers": True}) freqai_conf['freqai']['data_split_parameters'].update({'shuffle': True}) if 'test_3ac' in model or 'test_4ac' in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") if 'ReinforcementLearner' in model: model_save_ext = 'zip' freqai_conf = make_rl_config(freqai_conf) # test the RL guardrails freqai_conf['freqai']['feature_parameters'].update({"use_SVM_to_remove_outliers": True}) freqai_conf['freqai']['data_split_parameters'].update({'shuffle': True}) if 'test_3ac' in model or 'test_4ac' in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) strategy.freqai_info = freqai_conf.get("freqai", {}) freqai = strategy.freqai freqai.live = True freqai.dk = FreqaiDataKitchen(freqai_conf) freqai.dk.set_paths('ADA/BTC', 10000) timerange = TimeRange.parse_timerange("20180110-20180130") freqai.dd.load_all_pair_histories(timerange, freqai.dk) freqai.dd.pair_dict = MagicMock() data_load_timerange = TimeRange.parse_timerange("20180125-20180130") new_timerange = TimeRange.parse_timerange("20180127-20180130") freqai.dk.set_paths('ADA/BTC', None) freqai.train_timer("start", "ADA/BTC") freqai.extract_data_and_train_model( new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) freqai.train_timer("stop", "ADA/BTC") freqai.dd.save_metric_tracker_to_disk() freqai.dd.save_drawer_to_disk() assert Path(freqai.dk.full_path / "metric_tracker.json").is_file() assert Path(freqai.dk.full_path / "pair_dictionary.json").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.{model_save_ext}").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").is_file() shutil.rmtree(Path(freqai.dk.full_path)) @pytest.mark.parametrize('model, strat', [ ('LightGBMRegressorMultiTarget', "freqai_test_multimodel_strat"), ('XGBoostRegressorMultiTarget', "freqai_test_multimodel_strat"), ('CatboostRegressorMultiTarget', "freqai_test_multimodel_strat"), ('LightGBMClassifierMultiTarget', "freqai_test_multimodel_classifier_strat"), ('CatboostClassifierMultiTarget', "freqai_test_multimodel_classifier_strat") ])
a8c9aa01fb3c11330618f26efa822bfe9394124e
@pytest.mark.parametrize('model, strat', [ ('LightGBMRegressorMultiTarget', "freqai_test_multimodel_strat"), ('XGBoostRegressorMultiTarget', "freqai_test_multimodel_strat"), ('CatboostRegressorMultiTarget', "freqai_test_multimodel_strat"), ('LightGBMClassifierMultiTarget', "freqai_test_multimodel_classifier_strat"), ('CatboostClassifierMultiTarget', "freqai_test_multimodel_classifier_strat") ])
531
https://github.com/freqtrade/freqtrade.git
458
def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, dbscan, float32): if is
52
1,049
test_extract_data_and_train_model_Standard
14
0
4
11
keras/engine/base_layer_v1.py
270,931
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
10
Python
12
base_layer_v1.py
def _maybe_cast_inputs(self, inputs): compute_dtype = self._compute_dtype if ( self._autocast and compute_dtype and tf.as_dtype(compute_dtype).is_floating ):
84afc5193d38057e2e2badf9c889ea87d80d8fbf
48
https://github.com/keras-team/keras.git
75
def _maybe_cast_inputs(self, inputs): compute_dtype = self._compute_dtype if ( self._autocast and compute_dtype and tf.as_dtype(compute_dtype).is_floating ):
9
51
_maybe_cast_inputs
27
0
2
9
certbot-apache/certbot_apache/_internal/override_centos.py
186,660
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <ferrand.ad@gmail.com>
certbot
12
Python
27
override_centos.py
def _try_restart_fedora(self) -> None: try: util.run_script(['systemctl', 'restart', 'httpd']) except errors.SubprocessError as err: raise errors.MisconfigurationError(str(err)) # Finish with actual config check to see if systemctl restart helped super().config_test()
7d9e9a49005de7961e84d2a7c608db57dbab3046
46
https://github.com/certbot/certbot.git
84
def _try_restart_fedora(self) -> None: try: util.run_script(['systemctl', 'restart', 'httpd']) except errors.SubprocessError as err: raise errors.MisconfigurationError(str(err)) # Finish with actual config check to see if systemctl restart helped
11
85
_try_restart_fedora
38
1
3
10
saleor/graphql/discount/tests/test_bulk_delete.py
28,593
fix bulk delete mutation for sales (#10553)
saleor
13
Python
32
test_bulk_delete.py
def test_delete_sales(staff_api_client, sale_list, permission_manage_discounts): variables = { "ids": [graphene.Node.to_global_id("Sale", sale.id) for sale in sale_list] } response = staff_api_client.post_graphql( SALE_BULK_DELETE_MUTATION, variables, permissions=[permission_manage_discounts] ) content = get_graphql_content(response) assert content["data"]["saleBulkDelete"]["count"] == 3 assert not Sale.objects.filter(id__in=[sale.id for sale in sale_list]).exists() @mock.patch("saleor.plugins.webhook.plugin.get_webhooks_for_event") @mock.patch("saleor.plugins.webhook.plugin.trigger_webhooks_async")
34511f97738853af9e7332b89787202ecaa5eb4a
@mock.patch("saleor.plugins.webhook.plugin.get_webhooks_for_event") @mock.patch("saleor.plugins.webhook.plugin.trigger_webhooks_async")
92
https://github.com/saleor/saleor.git
70
def test_delete_sales(staff_api_client, sale_list, permission_manage_discounts): variables = { "ids": [graphene.Nod
23
173
test_delete_sales
24
0
1
20
tests/providers/google/cloud/hooks/vertex_ai/test_endpoint_service.py
47,196
Fix new MyPy errors in main (#22884) Those MyPe errors are side effect of some new dependencies.
airflow
14
Python
22
test_endpoint_service.py
def test_delete_endpoint(self, mock_client) -> None: self.hook.delete_endpoint( project_id=TEST_PROJECT_ID, region=TEST_REGION, endpoint=TEST_ENDPOINT_NAME, ) mock_client.assert_called_once_with(TEST_REGION) mock_client.return_value.delete_endpoint.assert_called_once_with( request=dict( name=mock_client.return_value.endpoint_path.return_value, ), metadata=(), retry=DEFAULT, timeout=None, ) mock_client.return_value.endpoint_path.assert_called_once_with( TEST_PROJECT_ID, TEST_REGION, TEST_ENDPOINT_NAME, )
6933022e94acf139b2dea9a589bb8b25c62a5d20
87
https://github.com/apache/airflow.git
208
def test_delete_endpoint(self, mock_client) -> None: self.hook.delete_endpoint( project_id=TEST_PROJECT_ID, region=TEST_REGION, endpoint=TEST_ENDPOINT_NAME, ) mock_client.assert_called_once_with(TEST_REGION) mock_client.return_value.delete_endpoint.assert_called_once_with( request=dict( name=mock_client.return_value.endpoint_path.return_value, ), metadata=(),
21
127
test_delete_endpoint
69
0
5
13
src/datasets/arrow_dataset.py
106,161
Sharded save_to_disk + multiprocessing (#5268) * add num_shards, num_proc, storage_options to save_to_disk * minor * add tests * remove old s3fs integreation tests * style * style * Update DatasetDict.save_to_disk * test dataset dict * update dataset dict load_from_disk * minor * update test * update docs * backport to_reader to pyarrow < 8 * typo * support both max_shard_size and num_shards * style * docstrings * test _estimate_nbytes * add test for num_shards * style * mario's comment * add config.PBAR_REFRESH_TIME_INTERVAL * fix docstrings * use kwargs_iterable in iflatmap_unordered * fix tests
datasets
11
Python
53
arrow_dataset.py
def _estimate_nbytes(self) -> int: dataset_nbytes = self.data.nbytes # Find decodable columns, because if there are any, we need to # adjust the dataset size computation (needed for sharding) to account for possible external files decodable_columns = [k for k, v in self.features.items() if require_decoding(v, ignore_decode_attribute=True)] if decodable_columns: # Approximate the space needed to store the bytes from the external files by analyzing the first 1000 examples extra_nbytes = 0
232a43943e87dfedcc328a9a3d3b4d89ea5c6627
113
https://github.com/huggingface/datasets.git
125
def _estimate_nbytes(self) -> int: dataset_nbytes = self.data.nbytes # Find decodable columns, because if there are any, we need to # adjust the dataset size computation (needed for sharding) to account for possible external files decodable_columns = [k for k, v in self.features.items() if require_decoding(v, ignore_decode_attribute=True)] if decodable_columns: # Approximate the space needed to store the bytes from the exte
14
75
_estimate_nbytes
15
0
2
6
mmdet/engine/hooks/set_epoch_info_hook.py
245,538
[Fix] replace mmcv's function and modules imported with mmengine's (#8594) * use mmengine's load_state_dict and load_checkpoint * from mmengine import dump * from mmengine import FileClient dump list_from_file * remove redundant registry * update * update * update * replace _load_checkpoint with CheckpointLoad.load_checkpoint * changes according to mmcv #2216 * changes due to mmengine #447 * changes due mmengine #447 and mmcv #2217 * changes due mmengine #447 and mmcv #2217 * update * update * update
mmdetection
9
Python
12
set_epoch_info_hook.py
def before_train_epoch(self, runner): epoch = runner.epoch model = runner.model if is_model_wrapper(model): model = model.module model.set_epoch(epoch)
d0695e68654ca242be54e655491aef8c959ac345
34
https://github.com/open-mmlab/mmdetection.git
53
def before_train_epoch(self, runner): epoch = runner.epoch model = runner.model if is_mo
8
55
before_train_epoch
40
0
4
12
src/datasets/table.py
104,416
Update docs to new frontend/UI (#3690) * WIP: update docs to new UI * make style * Rm unused * inject_arrow_table_documentation __annotations__ * hasattr(arrow_table_method, "__annotations__") * Update task_template.rst * Codeblock PT-TF-SPLIT * Convert loading scripts * Convert docs to mdx * Fix mdx * Add <Tip> * Convert mdx tables * Fix codeblock * Rm unneded hashlinks * Update index.mdx * Redo dev change * Rm circle ci `build_doc` & `deploy_doc` * Rm unneeded files * Update docs reamde * Standardize to `Example::` * mdx logging levels doc * Table properties inject_arrow_table_documentation * ``` to ```py mdx * Add Tips mdx * important,None -> <Tip warning={true}> * More misc * Center imgs * Update instllation page * `setup.py` docs section * Rm imgs since they are in hf.co * Update docs/source/access.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update index mdx * Update docs/source/access.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * just `Dataset` obj * Addedversion just italics * Update ReadInstruction doc example syntax * Change docstring for `prepare_for_task` * Chore * Remove `code` syntax from headings * Rm `code` syntax from headings * Hashlink backward compatability * S3FileSystem doc * S3FileSystem doc updates * index.mdx updates * Add darkmode gifs * Index logo img css classes * Index mdx dataset logo img size * Docs for DownloadMode class * Doc DownloadMode table * format docstrings * style * Add doc builder scripts (#3790) * add doc builder scripts * fix docker image * Docs new UI actions no self hosted (#3793) * No self hosted * replace doc injection by actual docstrings * Docstring formatted Co-authored-by: Quentin Lhoest <lhoest.q@gmail.com> Co-authored-by: Mishig Davaadorj <dmishig@gmail.com> Co-authored-by: Lysandre Debut <lysandre.debut@reseau.eseo.fr> Co-authored-by: Mishig Davaadorj <dmishig@gmail.com> * Rm notebooks from docs actions since they dont exi * Update tsting branch * More docstring * Chore * bump up node version * bump up node * ``` -> ```py for audio_process.mdx * Update .github/workflows/build_documentation.yml Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> * Uodate dev doc build * remove run on PR * fix action * Fix gh doc workflow * forgot this change when merging master * Update build doc Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> Co-authored-by: Quentin Lhoest <lhoest.q@gmail.com> Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> Co-authored-by: Lysandre Debut <lysandre.debut@reseau.eseo.fr>
datasets
16
Python
29
table.py
def remove_column(self, i, *args, **kwargs): table = self.table.remove_column(i, *args, **kwargs) name = self.table.column_names[i] blocks = [] for tables in self.blocks: blocks.append( [ t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t for t in tables ] ) return ConcatenationTable(table, blocks)
e35be138148333078284b942ccc9ed7b1d826f97
96
https://github.com/huggingface/datasets.git
172
def remove_column(self, i, *args, **kwargs): table = self.table.remove_column(i, *args, **kwargs) name = self.table.column_names[i] blocks = [] for tables in self.blocks: blocks.append( [ t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t
14
145
remove_column
41
0
5
17
.venv/lib/python3.8/site-packages/pip/_vendor/tenacity/__init__.py
63,772
upd; format
transferlearning
16
Python
36
__init__.py
def __call__(self, fn, *args, **kwargs): self.begin(fn) retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs) while True: do = self.iter(retry_state=retry_state) if isinstance(do, DoAttempt): try: result = fn(*args, **kwargs) except BaseException: # noqa: B902 retry_state.set_exception(sys.exc_info()) else: retry_state.set_result(result) elif isinstance(do, DoSleep): retry_state.prepare_for_next_attempt() self.sleep(do) else: return do
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
116
https://github.com/jindongwang/transferlearning.git
253
def __call__(self, fn, *args, **kwargs): self.begin(fn) retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs) while True: do = self.iter(retry_state=retry_state) if isinstance(do, DoAttempt):
22
185
__call__
45
0
5
13
wagtail/search/backends/database/postgres/postgres.py
75,502
Reformat with black
wagtail
11
Python
40
postgres.py
def add_items(self, model, objs): search_fields = model.get_search_fields() if not search_fields: return indexers = [ObjectIndexer(obj, self.backend) for obj in objs] # TODO: Delete unindexed objects while dealing with proxy models. if indexers: content_type_pk = get_content_type_pk(model) update_method = ( self.add_items_upsert if self._enable_upsert else self.add_items_update_then_create ) update_method(content_type_pk, indexers)
d10f15e55806c6944827d801cd9c2d53f5da4186
67
https://github.com/wagtail/wagtail.git
179
def add_items(self, model, objs): search_fields = model.get_search_fields() if not search_fields: return indexers = [ObjectIndexer(obj, self.backend) for obj in objs] # TODO: Delete unindexed objects while dealing with prox
16
104
add_items
20
0
2
5
.venv/lib/python3.8/site-packages/pip/_internal/models/link.py
60,829
upd; format
transferlearning
9
Python
18
link.py
def egg_fragment(self): # type: () -> Optional[str] match = self._egg_fragment_re.search(self._url) if not match: return None return match.group(1) _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)')
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
30
https://github.com/jindongwang/transferlearning.git
61
def egg_fragment(self): # type: () -> Optional[str] match = self._egg_fragment_re.search(self._url
10
65
egg_fragment
72
0
1
27
tests/rest/client/test_relations.py
247,047
Add type hints to `tests/rest/client` (#12084)
synapse
13
Python
46
test_relations.py
def test_aggregation_redactions(self) -> None: channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") self.assertEqual(200, channel.code, channel.json_body) to_redact_event_id = channel.json_body["event_id"] channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token ) self.assertEqual(200, channel.code, channel.json_body) # Now lets redact one of the 'a' reactions channel = self.make_request( "POST", "/_matrix/client/r0/rooms/%s/redact/%s" % (self.room, to_redact_event_id), access_token=self.user_token, content={}, ) self.assertEqual(200, channel.code, channel.json_body) channel = self.make_request( "GET", "/_matrix/client/unstable/rooms/%s/aggregations/%s" % (self.room, self.parent_id), access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) self.assertEqual( channel.json_body, {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]}, )
1901cb1d4a8b7d9af64493fbd336e9aa2561c20c
192
https://github.com/matrix-org/synapse.git
305
def test_aggregation_redactions(self) -> None: channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") self.assertEqual(200, channel.code, channel.json_body) to_redact_event_id = channel.json_body["event_id"] channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token ) self.assertEqual(200, channel.code, channel.json_body) # Now lets redact one of the 'a' reactions channel = self.make_request( "POST", "/_matrix/client/r0/rooms/%s/redact/%s" % (self.room, to_redact_event_id), access_token=self.user_token, content={}, ) self.assertEqual(200, channel.code, channel.json_body) channel = self.make_request( "GET", "/_matrix/client/unstable/rooms/%s/aggregations/%s" % (self.room, self.parent_id), access_token=self.user_token, ) self.assertEqual(200, channel
17
308
test_aggregation_redactions
124
0
6
24
python/ray/data/tests/test_split.py
125,629
[Data][Split] stable version of split with hints (#26778) Why are these changes needed? Introduce a stable version of split with hints with a stable equalizing algorithm: use the greedy algorithm to generate the initial unbalanced splits. for each splits, first shave them so the number for rows are below the target_size based on how many rows needed for each split, do a one time split_at_index to the left over blocks. merge the shaved splits with the leftover splits. The guarantee of this algorithm is we at most need to split O(split) number of blocks.
ray
13
Python
75
test_split.py
def _test_equal_split_balanced(block_sizes, num_splits): blocks = [] metadata = [] total_rows = 0 for block_size in block_sizes: block = list(range(total_rows, total_rows + block_size)) blocks.append(ray.put(block)) metadata.append(BlockAccessor.for_block(block).get_metadata(None, None)) total_rows += block_size block_list = BlockList(blocks, metadata, owned_by_consumer=True) ds = Dataset( ExecutionPlan(block_list, DatasetStats.TODO(), run_by_consumer=True), 0, False, ) splits = ds.split(num_splits, equal=True) split_counts = [split.count() for split in splits] assert len(split_counts) == num_splits expected_block_size = total_rows // num_splits # Check that all splits are the expected size. assert all([count == expected_block_size for count in split_counts]) expected_total_rows = sum(split_counts) # Check that the expected number of rows were dropped. assert total_rows - expected_total_rows == total_rows % num_splits # Check that all rows are unique (content check). split_rows = [row for split in splits for row in split.take(total_rows)] assert len(set(split_rows)) == len(split_rows)
aaab4abad5f8549cfdadbebf7819c8f046bcdffb
198
https://github.com/ray-project/ray.git
229
def _test_equal_split_balanced(block_sizes, num_splits): blocks = [] metadata = [] total_rows = 0 for block_size in block_sizes: block = list(range(total_rows, total_rows + block_size)) blocks.append(ray.put(block)) metadata.append(BlockAcces
39
307
_test_equal_split_balanced
22
0
2
10
openbb_terminal/featflags_controller.py
285,655
New path for .env (#2508) * add log path * add test to check if log file is in correct dir * env path * black * mypy fix * linting * add make_paths and change references * terminal change * change constants to paths * change names * black * mypy * mypy * pylint else * add make paths * remove custom user dir name Co-authored-by: Chavithra <chavithra@gmail.com>
OpenBBTerminal
10
Python
21
featflags_controller.py
def call_tbhint(self, _): if obbff.TOOLBAR_HINT: console.print("Will take effect when running terminal next.") obbff.TOOLBAR_HINT = not obbff.TOOLBAR_HINT set_key( obbff.USER_ENV_FILE, "OPENBB_TOOLBAR_HINT", str(obbff.TOOLBAR_HINT), ) console.print("")
3d0190e35bae4092f52025377d8604b3a6a17bfa
49
https://github.com/OpenBB-finance/OpenBBTerminal.git
108
def call_tbhint(self, _): if obbff.TOOLBAR_HINT: console.print("Will take effect when running terminal next.") obbff.TOOLBAR_HINT = not obbff.TOOLBAR_HINT set_key( obbff.USER_ENV_FILE, "OPENBB_TOOLBAR_HINT", str(obbff.TOOLB
10
85
call_tbhint
21
0
5
23
rllib/agents/maml/maml_tf_policy.py
133,758
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
4
Python
18
maml_tf_policy.py
def feed_forward(self, obs, policy_vars, policy_config): # Hacky for now, reconstruct FC network with adapted weights # @mluo: TODO for any network
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
144
https://github.com/ray-project/ray.git
34
def feed_forward(self, obs, policy_vars, policy_config): # Hacky for now, reconstruct FC network with adapted weights # @mluo: TODO for any netwo
5
17
feed_forward
64
0
1
34
tests/state/test_v2.py
248,545
Type annotations for `test_v2` (#12985)
synapse
14
Python
44
test_v2.py
def test_ban_vs_pl(self) -> None: events = [ FakeEvent( id="PA", sender=ALICE, type=EventTypes.PowerLevels, state_key="", content={"users": {ALICE: 100, BOB: 50}}, ), FakeEvent( id="MA", sender=ALICE, type=EventTypes.Member, state_key=ALICE, content={"membership": Membership.JOIN}, ), FakeEvent( id="MB", sender=ALICE, type=EventTypes.Member, state_key=BOB, content={"membership": Membership.BAN}, ), FakeEvent( id="PB", sender=BOB, type=EventTypes.PowerLevels, state_key="", content={"users": {ALICE: 100, BOB: 50}}, ), ] edges = [["END", "MB", "MA", "PA", "START"], ["END", "PB", "PA"]] expected_state_ids = ["PA", "MA", "MB"] self.do_check(events, edges, expected_state_ids)
97053c94060ea31d3b9d41a129221ad4b2a76865
193
https://github.com/matrix-org/synapse.git
486
def test_ban_vs_pl(self) -> None: events = [ FakeEvent( id="PA", sender=ALICE, type=EventTypes.PowerLevels, state_key
20
303
test_ban_vs_pl
141
0
4
23
dask/tests/test_layers.py
156,908
Fix caching-related MaterializedLayer.cull performance regression (#9413) * allow MaterializedLayer to cache culled_deps * format * make test more thorough * fix import mistake * add link to issue in comment * improve test
dask
11
Python
93
test_layers.py
def test_dataframe_cull_key_dependencies_materialized(): # Test that caching of MaterializedLayer # dependencies during culling doesn't break # the result of ``get_all_dependencies`` datasets = pytest.importorskip("dask.datasets") dd = pytest.importorskip("dask.dataframe") ddf = datasets.timeseries(end="2000-01-15") # Build a custom layer to ensure # MaterializedLayer is used name = "custom_graph_test" name_0 = "custom_graph_test_0" dsk = {} for i in range(ddf.npartitions): dsk[(name_0, i)] = (lambda x: x, (ddf._name, i)) dsk[(name, i)] = (lambda x: x, (name_0, i)) dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf]) result = dd.core.new_dd_object(dsk, name, ddf._meta, ddf.divisions) graph = result.dask # HLG cull culled_keys = [k for k in result.__dask_keys__() if k != (name, 0)] culled_graph = graph.cull(culled_keys) # Check that culled_deps are cached # See: https://github.com/dask/dask/issues/9389 cached_deps = culled_graph.key_dependencies.copy() deps = culled_graph.get_all_dependencies() assert cached_deps == deps # Manual cull deps0 = graph.get_all_dependencies() deps0.pop((name, 0)) deps0.pop((name_0, 0)) deps0.pop((ddf._name, 0)) # Check that get_all_dependencies results match assert deps0 == deps
81f771e05f57cab2838534c319a9b81f6e5a00cd
227
https://github.com/dask/dask.git
244
def test_dataframe_cull_key_dependencies_materialized(): # Test that caching of MaterializedLayer # dependencies during culling doesn't break # the result of ``get_all_dependencies`` datasets = pytest.importorskip("dask.datasets") dd = pytest.importorskip("dask.dataframe") ddf = datas
38
365
test_dataframe_cull_key_dependencies_materialized
10
0
1
3
keras/layers/tensorflow_op_layer_test.py
274,189
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
10
Python
9
tensorflow_op_layer_test.py
def build(self, input_shape): self.bias = self.add_weight(name="bias", dtype="float32") self.layer = keras.layers.Dense(10)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
35
https://github.com/keras-team/keras.git
23
def build(self, input_shape): self.bias = self.add_weight(name="bia
11
58
build
34
0
3
7
django/template/context.py
206,214
Refs #33476 -- Reformatted code with Black.
django
12
Python
31
context.py
def update(self, other_dict): "Push other_dict to the stack of dictionaries in the Context" if not hasattr(other_dict, "__getitem__"): raise TypeError("other_dict must be a mapping (dictionary-like) object.") if isinstance(other_dict, BaseContext): other_dict = other_dict.dicts[1:].pop() return ContextDict(self, other_dict)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
50
https://github.com/django/django.git
83
def update(self, other_dict): "Push other_dict to the stack of dictionaries in the Context" if not hasattr(other_dict, "__getitem__"): raise TypeError("other_dict must be a mapping (dictionary-like) object.") if isinstance(other_dict, BaseContext):
10
84
update
104
0
6
29
ppg2mel/preprocess.py
161,217
Upgrade to new web service (#529) * Init new GUI * Remove unused codes * Reset layout * Add samples * Make framework to support multiple pages * Add vc mode * Add preprocessing mode * Add training mode * Remove text input in vc mode * Add entry for GUI and revise readme * Move requirement together * Add error raise when no model folder found * Add readme
MockingBird
14
Python
79
preprocess.py
def preprocess_dataset(datasets_root, dataset, out_dir, n_processes, ppg_encoder_model_fpath, speaker_encoder_model): # Glob wav files wav_file_list = sorted(Path(f"{datasets_root}/{dataset}").glob("**/*.wav")) print(f"Globbed {len(wav_file_list)} wav files.") out_dir.joinpath("bnf").mkdir(exist_ok=True, parents=True) out_dir.joinpath("f0").mkdir(exist_ok=True, parents=True) out_dir.joinpath("embed").mkdir(exist_ok=True, parents=True) ppg_model_local = load_model(ppg_encoder_model_fpath, "cpu") encoder_model_local = Encoder.load_model(speaker_encoder_model, "cpu") if n_processes is None: n_processes = cpu_count() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") func = partial(preprocess_one, out_dir=out_dir, ppg_model_local=ppg_model_local, encoder_model_local=encoder_model_local, device=device) job = Pool(n_processes).imap(func, wav_file_list) list(tqdm(job, "Preprocessing", len(wav_file_list), unit="wav")) # finish processing and mark t_fid_file = out_dir.joinpath("train_fidlist.txt").open("w", encoding="utf-8") d_fid_file = out_dir.joinpath("dev_fidlist.txt").open("w", encoding="utf-8") e_fid_file = out_dir.joinpath("eval_fidlist.txt").open("w", encoding="utf-8") for file in sorted(out_dir.joinpath("f0").glob("*.npy")): id = os.path.basename(file).split(".f0.npy")[0] if id.endswith("01"): d_fid_file.write(id + "\n") elif id.endswith("09"): e_fid_file.write(id + "\n") else: t_fid_file.write(id + "\n") t_fid_file.close() d_fid_file.close() e_fid_file.close() return len(wav_file_list)
c5d03fb3cbf5105aa45dc131474260cf140b748b
334
https://github.com/babysor/MockingBird.git
241
def preprocess_dataset(datasets_root, dataset, out_dir, n_processes, ppg_encoder_model_fpath, speaker_encoder_model): # Glob wav files wav_file_list = sorted(Path(f"{datasets_root}/{dataset}").glob("**
49
593
preprocess_dataset
62
0
5
17
youtube_dl/extractor/neteasemusic.py
106,530
[netease] Impove error handling (#31303) * add warnings for users outside of China * skip empty song urls Co-authored-by: dirkf <fieldhouse@gmx.net>
youtube-dl
14
Python
49
neteasemusic.py
def _call_player_api(self, song_id, bitrate): url = 'https://interface3.music.163.com/eapi/song/enhance/player/url' data, headers = self.make_player_api_request_data_and_headers(song_id, bitrate) try: msg = 'empty result' result = self._download_json( url, song_id, data=data.encode('ascii'), headers=headers) if result: return result except ExtractorError as e: if type(e.cause) in (ValueError, TypeError): # JSON load failure raise except Exception as e: msg = error_to_compat_str(e) self.report_warning('%s API call (%s) failed: %s' % ( song_id, bitrate, msg)) return {}
d25cf62086443d86a633b8176b5c7e79f4cc569e
105
https://github.com/ytdl-org/youtube-dl.git
244
def _call_player_api(self, song_id, bitrate): url = 'https://interface3.music.163.com/eapi/son
21
171
_call_player_api
21
1
3
7
jax/_src/sharding.py
122,518
Make `MeshPspecSharding` an alias for `NamedSharding` (it was the other way around before this CL). PiperOrigin-RevId: 488473538
jax
9
Python
17
sharding.py
def _enable_cpp_named_sharding(): if xc._version >= 107: return xc.NamedSharding elif xc._version >= 95: return xc.MeshPspecSharding # type: ignore else: return None @pxla.use_cpp_class(_enable_cpp_named_sharding())
c42bad85ef427b2555464901f2edf2a19ad1564a
@pxla.use_cpp_class(_enable_cpp_named_sharding())
30
https://github.com/google/jax.git
32
def _enable_cpp_named_sharding(): if xc._version >= 107: return xc.NamedSharding elif xc._version >= 95: return xc.MeshPspecSharding # type: ignore else: return None @pxla.use_cpp_class(_enable_cpp_named_sharding())
7
66
_enable_cpp_named_sharding
21
0
1
10
src/urh/dev/PCAPNG.py
320,601
Adding Save As pcapng for ProtocolAnalyzer (#970)
urh
8
Python
19
PCAPNG.py
def _build_pcapng_idb(link_type) -> bytes: BLOCKTYPE = 0x00000001 BLOCKLENGTH = 20 SNAP_LEN = 0 return struct.pack(">IIHHII", BLOCKTYPE, BLOCKLENGTH, link_type, 0, SNAP_LEN, BLOCKLENGTH)
b7fd265179f1f646b51430d02ce3495920b7d2dd
35
https://github.com/jopohl/urh.git
147
def _build_pcapng_idb(link_type) -> bytes: BLOCKTYPE = 0x00
8
52
_build_pcapng_idb
9
0
1
5
tests/acceptance/test_account_settings.py
86,451
test: Use new settings routes in account settings test (#39587)
sentry
10
Python
9
test_account_settings.py
def test_account_subscriptions_settings(self): with self.feature("organizations:onboarding"): self.browser.get("/settings/account/subscriptions/") self.browser.wait_until_not('[data-test-id="loading-indicator"]') self.browser.snapshot("account subscriptions settings")
9399434e0a45da2c82209b38f1f214688e1ae4f3
37
https://github.com/getsentry/sentry.git
48
def test_account_subscriptions_settings(self): with self.feature("organizations:onboarding"): self.browser.get("/settings/account/s
7
71
test_account_subscriptions_settings
61
0
6
20
mindsdb/integrations/handlers/hana_handler/hana_handler.py
116,735
minor: use dummy table for sap hana conn check
mindsdb
13
Python
42
hana_handler.py
def check_connection(self) -> StatusResponse: response = StatusResponse(False) need_to_close = self.is_connected is False try: connection = self.connect() with connection.cursor() as cur: cur.execute('SELECT 1 FROM SYS.DUMMY') response.success = True except dbapi.Error as e: log.error(f'Error connecting to SAP HANA {self.address}, {e}!') response.error_message = e if response.success is True and need_to_close: self.disconnect() if response.success is False and self.is_connected is True: self.is_connected = False return response
2970711efb4c713b604eb3bac840ef9d21f18273
103
https://github.com/mindsdb/mindsdb.git
209
def check_connection(self) -> StatusResponse: response = StatusResponse(False) need_to_close = self.is_connected is False try: connection = self.connect
20
188
check_connection
62
0
4
32
tests/snuba/api/endpoints/test_organization_events_spans_performance.py
95,889
fix(suspect-spans): Use non aggregate variant of span examples query (#31295) The original span examples query uses an array join with an group by on the event id. This creates too many groupings, 1 per event id, which leads to slow query performance. This changes the query to do the same without using array join or any aggregates.
sentry
13
Python
38
test_organization_events_spans_performance.py
def suspect_span_examples_snuba_results(self, op, event): results = { "project.id": self.project.id, "id": event.event_id, } if op == "http.server": results.update( { "count_span_time": 1, "sum_span_time": 4.0, "max_span_time": 4.0, } ) elif op == "django.middleware": results.update( { "count_span_time": 2, "sum_span_time": 6.0, "max_span_time": 3.0, } ) elif op == "django.view": results.update( { "count_span_time": 3, "sum_span_time": 3.0, "max_span_time": 1.0, } ) else: assert False, f"Unexpected Op: {op}" return results
78fd2058f3db3ddeea4ac1d370656db6ad192a99
120
https://github.com/getsentry/sentry.git
470
def suspect_span_examples_snuba_results(self, op, event): results = { "project.id": self.project.id, "id": event.event_id,
9
190
suspect_span_examples_snuba_results
108
0
11
22
Add_two_Linked_List.py
22,294
refactor: clean code Signed-off-by: slowy07 <slowy.arfy@gmail.com>
Python
12
Python
40
Add_two_Linked_List.py
def Add_two_no(self, First, Second): prev = None temp = None carry = 0 while First is not None or Second is not None: first_data = 0 if First is None else First.data second_data = 0 if Second is None else Second.data Sum = carry + first_data + second_data carry = 1 if Sum >= 10 else 0 Sum = Sum if Sum < 10 else Sum % 10 temp = Node(Sum) if self.head is None: self.head = temp else: prev.next = temp prev = temp if First is not None: First = First.next if Second is not None: Second = Second.next if carry > 0: temp.next = Node(carry)
f0af0c43340763724f139fa68aa1e5a9ffe458b4
141
https://github.com/geekcomputers/Python.git
334
def Add_two_no(self, First, Second): prev = None temp = None carry = 0 while First is not None or Second is not None: first_data = 0 if First is None else First.data second_data = 0 if Second is None
14
217
Add_two_no
106
0
2
6
haystack/modeling/data_handler/samples.py
256,211
Apply black formatting (#2115) * Testing black on ui/ * Applying black on docstores * Add latest docstring and tutorial changes * Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too * Remove comments * Relax constraints on pydoc-markdown * Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade * Fix a couple of bugs * Add a type: ignore that was missing somehow * Give path to black * Apply Black * Apply Black * Relocate a couple of type: ignore * Update documentation * Make Linux CI run after applying Black * Triggering Black * Apply Black * Remove dependency, does not work well * Remove manually double trailing commas * Update documentation Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
haystack
13
Python
77
samples.py
def offset_to_token_idx_vecorized(token_offsets, ch_idx): # case ch_idx is at end of tokens if ch_idx >= np.max(token_offsets): # TODO check "+ 1" (it is needed for making end indices compliant with old offset_to_token_idx() function) # check whether end token is incluse or exclusive idx = np.argmax(token_offsets) + 1 # looking for the first occurence of token_offsets larger than ch_idx and taking one position to the left. # This is needed to overcome n special_tokens at start of sequence # and failsafe matching (the character start might not always coincide with a token offset, e.g. when starting at whitespace) else: idx = np.argmax(token_offsets > ch_idx) - 1 return idx
a59bca366174d9c692fa19750c24d65f47660ef7
44
https://github.com/deepset-ai/haystack.git
158
def offset_to_token_idx_vecorized(token_offsets, ch_idx): # case ch_idx is at end of tokens if ch_idx >= np.max(token_offsets): # TODO check "+ 1" (it is needed for making end indices compliant with old offset_to_token_idx() function) # check whether end token is incluse or exclusive idx = np.argmax(token_offsets) + 1 # looking for the first occurence of token_offsets larger than ch_idx and taking one position to the left. # This is needed to overcome n special_tokens at start of sequence # and failsafe matching (the character start might
7
79
offset_to_token_idx_vecorized
37
1
1
6
python/ray/tests/ludwig/test_ludwig.py
131,138
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
9
Python
32
test_ludwig.py
def run_api_experiment(config, data_parquet): # Sanity check that we get 4 slots over 1 host kwargs = get_horovod_kwargs() assert kwargs.get("num_hosts") == 1 assert kwargs.get("num_slots") == 2 # Train on Parquet dask_backend = RayBackend() train_with_backend(dask_backend, config, dataset=data_parquet, evaluate=False) @spawn
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
@spawn
49
https://github.com/ray-project/ray.git
56
def run_api_experiment(config, data_parquet): # Sanity check that we get 4 slots over 1 host kwargs = get_horovod_kwargs() assert kwargs.get("num_hosts") == 1 assert kwargs.get("num_slots") == 2
12
86
run_api_experiment
250
1
23
39
cps/admin.py
173,518
Make drive letters available in file picker
calibre-web
18
Python
65
admin.py
def list_restriction(res_type, user_id): if res_type == 0: # Tags as template restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)} for i, x in enumerate(config.list_denied_tags()) if x != ''] allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)} for i, x in enumerate(config.list_allowed_tags()) if x != ''] json_dumps = restrict + allow elif res_type == 1: # CustomC as template restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)} for i, x in enumerate(config.list_denied_column_values()) if x != ''] allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)} for i, x in enumerate(config.list_allowed_column_values()) if x != ''] json_dumps = restrict + allow elif res_type == 2: # Tags per user if isinstance(user_id, int): usr = ub.session.query(ub.User).filter(ub.User.id == user_id).first() else: usr = current_user restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)} for i, x in enumerate(usr.list_denied_tags()) if x != ''] allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)} for i, x in enumerate(usr.list_allowed_tags()) if x != ''] json_dumps = restrict + allow elif res_type == 3: # CustomC per user if isinstance(user_id, int): usr = ub.session.query(ub.User).filter(ub.User.id == user_id).first() else: usr = current_user restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)} for i, x in enumerate(usr.list_denied_column_values()) if x != ''] allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)} for i, x in enumerate(usr.list_allowed_column_values()) if x != ''] json_dumps = restrict + allow else: json_dumps = "" js = json.dumps(json_dumps) response = make_response(js) response.headers["Content-Type"] = "application/json; charset=utf-8" return response @admi.route("/ajax/fullsync", methods=["POST"]) @login_required
7eef44f73ccd19762ae3356d6c0ac70228ff3302
@admi.route("/ajax/fullsync", methods=["POST"]) @login_required
492
https://github.com/janeczku/calibre-web.git
581
def list_restriction(res_type, user_id): if res_type == 0: # Tags as template restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)} for i, x in enumerate(config.list_denied_tags()) if x != ''] allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)} for i, x in enumerate(config.list_allowed_tags()) if x != ''] json_dumps = restrict + allow elif res_type == 1: # CustomC as template restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)} for i, x in enumerate(config.list_denied_column_values()) if x != ''] allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)} for i, x in enumerate(config.list_allowed_column_values()) if x != ''] json_dumps = restrict + allow elif res_type == 2: # Tags per user if isinstance(user_id, int): usr = ub.session.query(ub.User).filter(ub.User.id == user_id).first() else: usr = current_user restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)} for i, x in enumerate(usr.list_denied_tags()) if x != ''] allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)} for i, x in enumerate(usr.list_allowed_tags()) if x != ''] json_dumps = restrict + allow elif res_type == 3: # CustomC per user if isinstance(user_id, int): usr = ub.session.query(ub.User).filter(ub.User.id == user_id).first() else: usr = current_user restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)} for i, x in enumerate(usr.list_denied_column_values()) if x != ''] allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)} for i, x in enumerate(usr.list_allowed_column_values()) if x != ''] json_dumps = restrict + allow else: json_dumps = "" js = json.dumps(json_dumps) response = make_response(js) response.headers["Content-Type"] = "application/json; charset=utf-8" return response @admi.route("/ajax/fullsync", methods=["POST"]) @login_required
37
887
list_restriction
27
0
4
8
python/ray/experimental/tf_utils.py
130,761
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
14
Python
18
tf_utils.py
def get_flat(self): # Eager mode. if not self.sess: return np.concatenate( [v.numpy().flatten() for v in self.variables.values()] ) # Graph mode. return np.concatenate( [v.eval(session=self.sess).flatten() for v in self.variables.values()] )
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
71
https://github.com/ray-project/ray.git
117
def get_flat(self):
12
118
get_flat
18
0
1
14
tests/components/android_ip_webcam/conftest.py
303,635
Add config flow to `android_ip_webcam` (#76222) Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
core
11
Python
13
conftest.py
def aioclient_mock_fixture(aioclient_mock) -> None: aioclient_mock.get( "http://1.1.1.1:8080/status.json?show_avail=1", text=load_fixture("android_ip_webcam/status_data.json"), status=HTTPStatus.OK, headers={"Content-Type": CONTENT_TYPE_JSON}, ) aioclient_mock.get( "http://1.1.1.1:8080/sensors.json", text=load_fixture("android_ip_webcam/sensor_data.json"), status=HTTPStatus.OK, headers={"Content-Type": CONTENT_TYPE_JSON}, )
f90d007e73b52cd06b2a450b2f9a215b4b0b384d
64
https://github.com/home-assistant/core.git
89
def aioclient_mock_fixture(aioclient_mock) -> None: aioclient_mock.get( "http://1.1.1.1:8080/status.json?show_avail=1", text=load_fixture("android_ip_webcam/status_data.json"), status=HTTPStatus.OK, headers={"Content-Type": CONTENT_TYPE_JSON}, ) aioclient_mock.get( "http://1.1.1.1:8080/sensors.json", text=load_fixture("android_ip_webcam/sensor_data.j
10
108
aioclient_mock_fixture
4
0
1
2
sympy/physics/mechanics/joint.py
200,201
Implement WeldJoint
sympy
8
Python
4
joint.py
def _set_angular_velocity(self): self.child_interframe.set_ang_vel(self.parent_interframe, 0)
0ea6acbf9547be893df8b1df918b712b6ad9ba21
17
https://github.com/sympy/sympy.git
10
def _set_angular_velocity(self): self.child_interframe.set_
5
27
_set_angular_velocity
44
0
1
12
test/test_prototype_transforms.py
193,371
Rename features.SegmentationMask to features.Mask (#6579) * rename features.SegmentationMask -> features.Mask * rename kernels *_segmentation_mask -> *_mask and cleanup input name * cleanup * rename module _segmentation_mask.py -> _mask.py * fix test
vision
11
Python
35
test_prototype_transforms.py
def test__extract_image_targets_assertion(self, mocker): transform = transforms.SimpleCopyPaste() flat_sample = [ # images, batch size = 2 self.create_fake_image(mocker, features.Image), # labels, bboxes, masks mocker.MagicMock(spec=features.Label), mocker.MagicMock(spec=features.BoundingBox), mocker.MagicMock(spec=features.Mask), # labels, bboxes, masks mocker.MagicMock(spec=features.BoundingBox), mocker.MagicMock(spec=features.Mask), ] with pytest.raises(TypeError, match="requires input sample to contain equal sized list of Images"): transform._extract_image_targets(flat_sample)
1ea73f5832f6b7ccf7c74dacb38a63b7ea2dd720
102
https://github.com/pytorch/vision.git
181
def test__extract_image_targets_assertion(self, mocker): transform = transforms.SimpleCopyPaste() flat_sample = [ # images, batch size = 2 self.create_fake_image(mocker, features.Image), # labels, bboxes, masks mocker.MagicMock(spec=features.Label), mocker.MagicMock(spec=features.BoundingBox), mocker.MagicMock(spec=features.Mask), # labels, bboxes, masks mocker.MagicMock(spec=features.BoundingBox), mocker.MagicMock(spec=features.Mas
20
160
test__extract_image_targets_assertion
7
0
1
2
qutebrowser/browser/webkit/webkittab.py
321,204
Run scripts/dev/rewrite_enums.py
qutebrowser
9
Python
7
webkittab.py
def down(self, count=1): self._key_press(Qt.Key.Key_Down, count, 'scrollBarMaximum', Qt.Orientation.Vertical)
0877fb0d78635692e481c8bde224fac5ad0dd430
29
https://github.com/qutebrowser/qutebrowser.git
13
def down(self, count=1): self._key_press(Qt.Key.Key_Down, count, 'scrollBarMaximum', Qt.Orient
9
44
down
523
0
35
76
sympy/combinatorics/tensor_can.py
198,366
Cleanup loops and ranges
sympy
17
Python
243
tensor_can.py
def canonicalize(g, dummies, msym, *v): from sympy.combinatorics.testutil import canonicalize_naive if not isinstance(msym, list): if msym not in (0, 1, None): raise ValueError('msym must be 0, 1 or None') num_types = 1 else: num_types = len(msym) if not all(msymx in (0, 1, None) for msymx in msym): raise ValueError('msym entries must be 0, 1 or None') if len(dummies) != num_types: raise ValueError( 'dummies and msym must have the same number of elements') size = g.size num_tensors = 0 v1 = [] for base_i, gens_i, n_i, sym_i in v: # check that the BSGS is minimal; # this property is used in double_coset_can_rep; # if it is not minimal use canonicalize_naive if not _is_minimal_bsgs(base_i, gens_i): mbsgs = get_minimal_bsgs(base_i, gens_i) if not mbsgs: can = canonicalize_naive(g, dummies, msym, *v) return can base_i, gens_i = mbsgs v1.append((base_i, gens_i, [[]] * n_i, sym_i)) num_tensors += n_i if num_types == 1 and not isinstance(msym, list): dummies = [dummies] msym = [msym] flat_dummies = [] for dumx in dummies: flat_dummies.extend(dumx) if flat_dummies and flat_dummies != list(range(flat_dummies[0], flat_dummies[-1] + 1)): raise ValueError('dummies is not valid') # slot symmetry of the tensor size1, sbase, sgens = gens_products(*v1) if size != size1: raise ValueError( 'g has size %d, generators have size %d' % (size, size1)) free = [i for i in range(size - 2) if i not in flat_dummies] num_free = len(free) # g1 minimal tensor under slot symmetry g1 = canonical_free(sbase, sgens, g, num_free) if not flat_dummies: return g1 # save the sign of g1 sign = 0 if g1[-1] == size - 1 else 1 # the free indices are kept fixed. # Determine free_i, the list of slots of tensors which are fixed # since they are occupied by free indices, which are fixed. start = 0 for i, (base_i, gens_i, n_i, sym_i) in enumerate(v): free_i = [] len_tens = gens_i[0].size - 2 # for each component tensor get a list od fixed islots for j in range(n_i): # get the elements corresponding to the component tensor h = g1[start:(start + len_tens)] fr = [] # get the positions of the fixed elements in h for k in free: if k in h: fr.append(h.index(k)) free_i.append(fr) start += len_tens v1[i] = (base_i, gens_i, free_i, sym_i) # BSGS of the tensor with fixed free indices # if tensor_gens fails in gens_product, use canonicalize_naive size, sbase, sgens = gens_products(*v1) # reduce the permutations getting rid of the free indices pos_free = [g1.index(x) for x in range(num_free)] size_red = size - num_free g1_red = [x - num_free for x in g1 if x in flat_dummies] if sign: g1_red.extend([size_red - 1, size_red - 2]) else: g1_red.extend([size_red - 2, size_red - 1]) map_slots = _get_map_slots(size, pos_free) sbase_red = [map_slots[i] for i in sbase if i not in pos_free] sgens_red = [_af_new([map_slots[i] for i in y._array_form if i not in pos_free]) for y in sgens] dummies_red = [[x - num_free for x in y] for y in dummies] transv_red = get_transversals(sbase_red, sgens_red) g1_red = _af_new(g1_red) g2 = double_coset_can_rep( dummies_red, msym, sbase_red, sgens_red, transv_red, g1_red) if g2 == 0: return 0 # lift to the case with the free indices g3 = _lift_sgens(size, pos_free, free, g2) return g3
7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c
638
https://github.com/sympy/sympy.git
1,079
def canonicalize(g, dummies, msym, *v): from sympy.combinatorics.testutil import canonicalize_naive if not isinstance(msym, list): if msym not in (0, 1, None): raise ValueError('msym must be 0, 1 or None') num_types = 1 else: num_types = len(msym) if not all(msymx in (0, 1, None) for msymx in msym): raise ValueError('msym entries must be 0, 1 or None') if len(dummies) != num_types: raise ValueError( 'dummies and msym must have the same number of elements') size = g.size num_tensors = 0 v1 = [] for base_i, gens_i, n_i, sym_i in v: # check that the BSGS is minimal; # this property is used in double_coset_can_rep; # if it is not minimal use canonicalize_naive if not _is_minimal_bsgs(base_i, gens_i): mbsgs = get_minimal_bsgs(base_i, gens_i) if not mbsgs: can = canonicalize_naive(g, dummies, msym, *v) return can base_i, gens_i = mbsgs v1.append((base_i, gens_i, [[]] * n_i, sym_i)) num_tensors += n_i if num_types == 1 and not isinstance(msym, list): dummies = [dummies] msym = [msym] flat_dummies = [] for dumx in dummies
69
980
canonicalize
43
0
1
14
awx/main/tests/unit/test_tasks.py
80,402
Refactored tasks.py to a package --- Added 3 new sub-package : awx.main.tasks.system , awx.main.tasks.jobs , awx.main.tasks.receptor --- Modified the functional tests and unit tests accordingly
awx
15
Python
39
test_tasks.py
def test_custom_environment_injectors_with_reserved_env_var(self, private_data_dir, job): task = tasks.jobs.RunJob() task.instance = job some_cloud = CredentialType( kind='cloud', name='SomeCloud', managed=False, inputs={'fields': [{'id': 'api_token', 'label': 'API Token', 'type': 'string'}]}, injectors={'env': {'JOB_ID': 'reserved'}}, ) credential = Credential(pk=1, credential_type=some_cloud, inputs={'api_token': 'ABC123'}) job.credentials.add(credential) env = task.build_env(job, private_data_dir) assert env['JOB_ID'] == str(job.pk)
a4a3ba65d736045733cb49430d7076b73aec23bb
124
https://github.com/ansible/awx.git
153
def test_custom_environment_injectors_with_reserved_env_var(self, private_data_dir, job): task = tasks.jobs.RunJob() task.instance = job some_cloud = CredentialType( kind='cloud', name='SomeCloud', managed=False, inputs={'fields': [{'id': 'api_token', 'label': 'API Token', 'type': 'string'}]}, injectors={'env': {'JOB_ID': 'reserved'}}, ) credential = Credential(pk=1, credential_type=some_cloud, inputs={'api_token': 'ABC123'}) job.credentials.add(credential) env = task.b
25
212
test_custom_environment_injectors_with_reserved_env_var
47
0
1
25
tests/sentry/api/endpoints/test_organization_auditlogs.py
94,083
feat(auditlog): Remove versioning from endpoint (#37301) * feat(auditlog): Remove versioning from endpoint * Update tests
sentry
13
Python
29
test_organization_auditlogs.py
def test_simple(self): now = timezone.now() org2 = self.create_organization(owner=self.user) entry1 = AuditLogEntry.objects.create( organization=self.organization, event=audit_log.get_event_id("ORG_EDIT"), actor=self.user, datetime=now, ) entry2 = AuditLogEntry.objects.create( organization=self.organization, event=audit_log.get_event_id("ORG_EDIT"), actor=self.user, datetime=now + timedelta(seconds=1), ) AuditLogEntry.objects.create( organization=org2, event=audit_log.get_event_id("ORG_EDIT"), actor=self.user, datetime=now, ) response = self.get_success_response(self.organization.slug) assert len(response.data["rows"]) == 2 assert response.data["rows"][0]["id"] == str(entry2.id) assert response.data["rows"][1]["id"] == str(entry1.id)
2e24063442134bf50a485b69698b154d0090a361
193
https://github.com/getsentry/sentry.git
262
def test_simple(self): now = timezone.now() org2 = self.create_organization(owner=self.user) entry1 = AuditLogEntry.objects.create( organization=self.organization, event=audit_log.get_event_id("ORG_EDIT"), actor=self.user, datetime=now, ) entry2 = AuditLogEnt
28
303
test_simple
30
0
1
5
zerver/webhooks/github/tests.py
85,106
webhooks: Pick a more reasonable length for short sha. 7 characters are not enough for large projects, so we change it to reasonably longer. As an example, The Linux kernel needs at least 11 characters of sha in its shortened form to identify a revision. We pick 11 so it should work for most of the projects. Signed-off-by: Zixuan James Li <p359101898@gmail.com>
zulip
10
Python
28
tests.py
def test_push_50_commits_filtered_by_branches(self) -> None: self.url = self.build_webhook_url(branches="master,changes") commit_info = "* Update README.md ([0d1a26e67d8](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n" expected_message = f"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 50 commits to branch changes.\n\n{commit_info * COMMITS_LIMIT}[and 30 more commit(s)]" self.check_webhook("push__50_commits", TOPIC_BRANCH, expected_message)
4e4689949438735622bdf669f05d218c671e7e01
36
https://github.com/zulip/zulip.git
57
def test_push_50_commits_filtered_by_branches(self) -> None: self.url = self.build_webhook_url(branches="master,changes") commit_info = "* Update README.md ([0d1a26e67d8](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n" expected_message = f"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 50 commits to branch changes.\n\n{commit_info * COMMITS_LIMIT
10
73
test_push_50_commits_filtered_by_branches
8
0
1
2
modules/image/Image_editing/super_resolution/swinir_l_real_sr_x4/swinir.py
51,945
add swinir_l_real_sr_x4 (#2076) * git add swinir_l_real_sr_x4 * fix typo * fix typo Co-authored-by: chenjian <chenjian26@baidu.com>
PaddleHub
8
Python
8
swinir.py
def extra_repr(self) -> str: return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
2e373966a7fd3119c205350fb14d0b7bfe74185d
10
https://github.com/PaddlePaddle/PaddleHub.git
14
def extra_repr(self) -> str: return
6
38
extra_repr
133
0
14
37
pipenv/patched/pip/_vendor/requests/adapters.py
22,048
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
pipenv
15
Python
63
adapters.py
def cert_verify(self, conn, url, verify, cert): if url.lower().startswith("https") and verify: cert_loc = None # Allow self-specified cert location. if verify is not True: cert_loc = verify if not cert_loc: cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) if not cert_loc or not os.path.exists(cert_loc): raise OSError( f"Could not find a suitable TLS CA certificate bundle, " f"invalid path: {cert_loc}" ) conn.cert_reqs = "CERT_REQUIRED" if not os.path.isdir(cert_loc): conn.ca_certs = cert_loc else: conn.ca_cert_dir = cert_loc else: conn.cert_reqs = "CERT_NONE" conn.ca_certs = None conn.ca_cert_dir = None if cert: if not isinstance(cert, basestring): conn.cert_file = cert[0] conn.key_file = cert[1] else: conn.cert_file = cert conn.key_file = None if conn.cert_file and not os.path.exists(conn.cert_file): raise OSError( f"Could not find the TLS certificate file, " f"invalid path: {conn.cert_file}" ) if conn.key_file and not os.path.exists(conn.key_file): raise OSError( f"Could not find the TLS key file, invalid path: {conn.key_file}" )
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
205
https://github.com/pypa/pipenv.git
631
def cert_verify(self, conn, url, verify, cert): if url.lower().startswith("https") and verify: cert_loc = None # Allow self-specified cert location. if verify is not True: cert_loc = verify if not cert_loc: cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) if not cert_loc or not os.path.exists(cert_loc): raise OSError( f"Could not find a suitable TLS CA certificate bundle, " f"invalid path: {cert_loc}" ) conn.cert_reqs = "CERT_REQUIRED" if not os.path.isdir(cert_loc): conn.ca_certs = cert_loc else: conn.ca_cert_dir = cert_loc else: conn.cert_reqs = "CERT_NONE" conn.ca_certs = None conn.ca_cert_dir = None if cert: if not isinstance(cert, basestring): conn.cert_file = cert[0] conn.key_file = cert[1] else: conn.cert_file = cert conn.key_file = None if conn.cert_file and not os.path.exists(conn.cert_file): raise OSError( f"Could not find the TLS certificate file, " f"invalid path: {conn.cert_file}" ) if conn.key_file and not os.path.exists(conn.key_file): raise OSError( f"Could not find the TLS key file, invalid path:
23
357
cert_verify
189
0
11
67
pandas/plotting/_matplotlib/core.py
171,266
STYLE: fix pylint reimported warnings (#49645) * STYLE: fix pylint reimported warnings * fixup! STYLE: fix pylint reimported warnings
pandas
17
Python
95
core.py
def _make_plot(self) -> None: colors = self._get_colors() ncolors = len(colors) pos_prior = neg_prior = np.zeros(len(self.data)) K = self.nseries for i, (label, y) in enumerate(self._iter_data(fillna=0)): ax = self._get_ax(i) kwds = self.kwds.copy() if self._is_series: kwds["color"] = colors elif isinstance(colors, dict): kwds["color"] = colors[label] else: kwds["color"] = colors[i % ncolors] errors = self._get_errorbars(label=label, index=i) kwds = dict(kwds, **errors) label = pprint_thing(label) label = self._mark_right_label(label, index=i) if (("yerr" in kwds) or ("xerr" in kwds)) and (kwds.get("ecolor") is None): kwds["ecolor"] = mpl.rcParams["xtick.color"] start = 0 if self.log and (y >= 1).all(): start = 1 start = start + self._start_base if self.subplots: w = self.bar_width / 2 rect = self._plot( ax, self.ax_pos + w, y, self.bar_width, start=start, label=label, log=self.log, **kwds, ) ax.set_title(label) elif self.stacked: mask = y > 0 start = np.where(mask, pos_prior, neg_prior) + self._start_base w = self.bar_width / 2 rect = self._plot( ax, self.ax_pos + w, y, self.bar_width, start=start, label=label, log=self.log, **kwds, ) pos_prior = pos_prior + np.where(mask, y, 0) neg_prior = neg_prior + np.where(mask, 0, y) else: w = self.bar_width / K rect = self._plot( ax, self.ax_pos + (i + 0.5) * w, y, w, start=start, label=label, log=self.log, **kwds, ) self._append_legend_handles_labels(rect, label)
289f32df5a565848adbc0adc8949fa4066542316
445
https://github.com/pandas-dev/pandas.git
1,151
def _make_plot(self) -> None: colors = self._get_colors() ncolors = len(colors) pos_prior = neg_prior = np.zeros(len(self.data)) K = self.nseries for i, (label, y) in enumerate(self._iter_data(fillna=0)): ax = self._get_ax(i) kwds = self.kwds.copy() if self._is_series: kwds["color"] = colors elif isinstance(colors, dict): kwds["color"] = colors[label] else: kwds["color"] = colors[i % ncolors] errors = self._get_errorbars(label=label, index=i) kwds = dict(kwds, **errors) label = pprint_thing(label) label = self._mark_right_label(label, index=i) if (("yerr" in kwds) or ("xerr" in kwds)) and (kwds.get("ecolor") is None): kwds["ecolor"] = mpl.rcParams["xtick.color"] start = 0 if self.log and (y >= 1).all(): start = 1 start = start + self._start_base if self.subplots: w = self.bar_width / 2 rect = self._plot( ax, self.ax_pos + w, y, self.bar_width, start=start, label=label, log=self.log, **kwds, ) ax.set_title(label) elif self.stacked: mask = y > 0 start = np.where(mask, pos_prior, neg_prior) + self._start_base w = self.bar_width / 2 rect = self._plot( ax, self.ax_pos + w, y, self.bar_width, start=start, label=label, log=self.log, **kwds, ) pos_prior = pos_prior + np.where(mask, y, 0) neg_prior = neg_prior + np.where(mask, 0, y) else: w = self.bar
49
672
_make_plot
157
0
1
70
zerver/tests/test_subs.py
83,055
stream colors: Try harder to avoid collisions. We now use recipient_id % 24 for new stream colors when users have already used all 24 of our canned colors. This fix doesn't address the scenario that somebody dislikes one of our current canned colors, so if a user continually changes canned color N to some other color for new streams, their new streams will continue to include color N (and the user will still need to change them). This fix doesn't address the fact that it can be expensive during bulk-add situations to query for all the colors that users have already used up. See https://chat.zulip.org/#narrow/stream/3-backend/topic/assigning.20stream.20colors for more discussion.
zulip
10
Python
106
test_subs.py
def test_pick_colors(self) -> None: used_colors: Set[str] = set() color_map: Dict[int, str] = {} recipient_ids = list(range(30)) user_color_map = pick_colors(used_colors, color_map, recipient_ids) self.assertEqual( user_color_map, { 0: "#76ce90", 1: "#fae589", 2: "#a6c7e5", 3: "#e79ab5", 4: "#bfd56f", 5: "#f4ae55", 6: "#b0a5fd", 7: "#addfe5", 8: "#f5ce6e", 9: "#c2726a", 10: "#94c849", 11: "#bd86e5", 12: "#ee7e4a", 13: "#a6dcbf", 14: "#95a5fd", 15: "#53a063", 16: "#9987e1", 17: "#e4523d", 18: "#c2c2c2", 19: "#4f8de4", 20: "#c6a8ad", 21: "#e7cc4d", 22: "#c8bebf", 23: "#a47462", # start repeating 24: "#76ce90", 25: "#fae589", 26: "#a6c7e5", 27: "#e79ab5", 28: "#bfd56f", 29: "#f4ae55", }, ) color_map = {98: "color98", 99: "color99"} used_colors = set(STREAM_ASSIGNMENT_COLORS) - {"#c6a8ad", "#9987e1"} recipient_ids = [99, 98, 1, 2, 3, 4] user_color_map = pick_colors(used_colors, color_map, recipient_ids) self.assertEqual( user_color_map, {98: "color98", 99: "color99", 1: "#9987e1", 2: "#c6a8ad", 3: "#e79ab5", 4: "#bfd56f"}, ) used_colors = set(STREAM_ASSIGNMENT_COLORS) color_map = {} recipient_ids = [2, 26, 50, 74] user_color_map = pick_colors(used_colors, color_map, recipient_ids) self.assertEqual( user_color_map, {2: "#a6c7e5", 26: "#a6c7e5", 50: "#a6c7e5", 74: "#a6c7e5"}, )
dd1c9c45c778dc5280c2b02c3b9fb327d2507cc1
315
https://github.com/zulip/zulip.git
832
def test_pick_colors(self) -> None: used_colors: Set[str] = set() color_map: Dict[int, str] = {} recipient_ids = list(range(30)) user_color_map = pick_colors(used_colors, color_map, recipient_ids) self.assertEqual( user_color_map, { 0: "#76ce90", 1: "#fae589", 2: "#a6c7e5", 3: "#e79ab5", 4: "#bfd56f", 5: "#f4ae55", 6: "#b0a5fd", 7: "#addfe5", 8: "#f5ce6e", 9: "#c2726a", 10: "#94c849", 11: "#bd86e5", 12: "#ee7e4a", 13: "#a6dcbf", 14: "#95a5fd", 15: "#53a063", 16: "#9987e1", 17: "#e4523d", 18: "#c2c2c2", 19: "#4f8de4", 20: "#c6a8ad", 21: "#e7cc4d", 22: "#c8bebf", 23: "#a47462", # start repeating 24: "#76ce90", 25: "#fae589", 26: "#a6c7e5", 27: "#e79ab5", 28: "#bfd56f", 29: "#f4ae55",
16
520
test_pick_colors
37
0
1
10
keras/engine/base_layer_test.py
270,852
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
14
Python
35
base_layer_test.py
def test_dynamic_layer_error_running_in_graph_mode(self): with tf.compat.v1.get_default_graph().as_default(): model = test_utils.get_model_from_layers( [DynamicLayer(dynamic=True)], input_shape=(3,) ) self.assertEqual(model.dynamic, True) # But then you cannot run the model since you're in a graph scope. with self.assertRaisesRegex( ValueError, "You must enable eager execution" ): model.compile(rmsprop.RMSprop(0.001), loss="mse")
84afc5193d38057e2e2badf9c889ea87d80d8fbf
79
https://github.com/keras-team/keras.git
154
def test_dynamic_layer_error_running_in_graph_mode(self): with tf.compat.v1.get_default_graph().as_default(): model = test_utils.get_model_fr
20
129
test_dynamic_layer_error_running_in_graph_mode
41
0
4
11
django/templatetags/tz.py
206,340
Refs #33476 -- Reformatted code with Black.
django
11
Python
33
tz.py
def timezone_constructor(tzname): if settings.USE_DEPRECATED_PYTZ: import pytz try: return pytz.timezone(tzname) except pytz.UnknownTimeZoneError: raise UnknownTimezoneException try: return zoneinfo.ZoneInfo(tzname) except zoneinfo.ZoneInfoNotFoundError: raise UnknownTimezoneException # HACK: datetime instances cannot be assigned new attributes. Define a subclass # in order to define new attributes in do_timezone().
9c19aff7c7561e3a82978a272ecdaad40dda5c00
44
https://github.com/django/django.git
104
def timezone_constructor(tzname): if settings.USE_DEPRECATED_PYTZ: import pytz try: return pytz.timezone(tzname) except pytz.UnknownTimeZoneError: raise UnknownTimezone
11
75
timezone_constructor
38
0
1
15
tests/sentry/api/serializers/test_organization.py
96,286
feat(apidocs): type organization serializer (#31787)
sentry
10
Python
28
test_organization.py
def test_trusted_relay_serializer(self): completion_seen = timezone.now() serializer = OnboardingTasksSerializer() task = OrganizationOnboardingTask.objects.create( organization=self.organization, task=OnboardingTask.FIRST_PROJECT, status=OnboardingTaskStatus.PENDING, user=self.user, completion_seen=completion_seen, ) result = serialize(task, self.user, serializer) assert result["task"] == "create_project" assert result["status"] == "pending" assert result["completionSeen"] == completion_seen assert result["data"] == {}
1bd1c98d520fb44c5e69f3159102cd8f07b84911
95
https://github.com/getsentry/sentry.git
155
def test_trusted_relay_serializer(self): completion_seen = timezone.now() serializer = OnboardingTasksSerializer() task = OrganizationOnboardingTask.objects.create( organization=self.organization, task
20
153
test_trusted_relay_serializer
14
0
1
8
tests/functional/eks/test_kubeconfig.py
189,163
Deprecate Kubernetes client API version v1alpha1 Kubernetes has deprecated v1alpha1, v1beta1 has been available since Kubernetes v1.11 (kubernetes/kubernetes#64482), and EKS currently supports Kubernetes versions v1.16 through v1.21. This is a breaking change for clients running versions v1.10 and older, which haven't been supported by EKS since September 2019. "aws eks get-token" now respects the KUBERNETES_EXEC_INFO environment variable and conservatively falls back to v1alpha1, which is supported by Kubernetes versions 1.10 through 1.22 (released upstream August 2021, to be released by EKS in Q4 2021). It also now supports "v1beta1" and "v1". "aws eks update-kubeconfig" now writes "v1beta1" in the kubeconfig which will be supported by Kubernetes until 1.29 (aproximately December 2023). At or around that date, we can change the default version written to kubeconfigs to "v1" Signed-off-by: Micah Hausler <mhausler@amazon.com>
aws-cli
11
Python
13
test_kubeconfig.py
def test_load_noexist(self): no_exist_path = os.path.join(self._temp_directory, "this_does_not_exist") loaded_config = self._loader.load_kubeconfig(no_exist_path) self.assertEqual(loaded_config.content, _get_new_kubeconfig_content()) self._validator.validate_config.called_with( Kubeconfig(no_exist_path, _get_new_kubeconfig_content()))
1a6b498657ec5dd29ddf4f6b240c6fc0c5d88f7a
58
https://github.com/aws/aws-cli.git
112
def test_load_noexist(self): no_exist_path = os.path.join(self._temp_directory, "this_does_not_exist") loaded_config = self._loader.load_kubeconfig(no_exist_
17
94
test_load_noexist
11
0
7
58
tests/sentry/eventstream/kafka/test_consumer.py
90,358
ci: compile devservices args (#34891)
sentry
11
Python
8
test_consumer.py
def test_consumer_start_from_committed_offset(requires_kafka): consumer_group = f"consumer-{uuid.uuid1().hex}" synchronize_commit_group = f"consumer-{uuid.uuid1().hex}" messages_delivered = defaultdict(list)
34bb5f4c3909e671b6502eceeb849f9693d9794e
386
https://github.com/getsentry/sentry.git
19
def test_consumer_start_from_committed_offset(requires_kafka): consumer_group = f"consumer-{uuid.uuid1().hex}" synchronize_commit_group = f"consumer-{uuid.uuid1().hex}" messages_delivered = defaultdict(list)
10
61
test_consumer_start_from_committed_offset
10
0
1
2
django/db/models/expressions.py
205,486
Refs #33476 -- Reformatted code with Black.
django
8
Python
10
expressions.py
def window_frame_start_end(self, connection, start, end): raise NotImplementedError("Subclasses must implement window_frame_start_end().")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
16
https://github.com/django/django.git
16
def window_frame_start_end(self, connection, start, end): raise
6
25
window_frame_start_end
175
0
7
48
tools/program.py
23,359
add use_xpu config for det_mv3_db.yml
PaddleOCR
15
Python
119
program.py
def preprocess(is_train=False): FLAGS = ArgsParser().parse_args() profiler_options = FLAGS.profiler_options config = load_config(FLAGS.config) config = merge_config(config, FLAGS.opt) profile_dic = {"profiler_options": FLAGS.profiler_options} config = merge_config(config, profile_dic) if is_train: # save_config save_model_dir = config['Global']['save_model_dir'] os.makedirs(save_model_dir, exist_ok=True) with open(os.path.join(save_model_dir, 'config.yml'), 'w') as f: yaml.dump( dict(config), f, default_flow_style=False, sort_keys=False) log_file = '{}/train.log'.format(save_model_dir) else: log_file = None logger = get_logger(name='root', log_file=log_file) # check if set use_gpu=True in paddlepaddle cpu version use_gpu = config['Global']['use_gpu'] check_gpu(use_gpu) # check if set use_xpu=True in paddlepaddle cpu/gpu version use_xpu = False if 'use_xpu' in config['Global']: use_xpu = config['Global']['use_xpu'] check_xpu(use_xpu) alg = config['Architecture']['algorithm'] assert alg in [ 'EAST', 'DB', 'SAST', 'Rosetta', 'CRNN', 'STARNet', 'RARE', 'SRN', 'CLS', 'PGNet', 'Distillation', 'NRTR', 'TableAttn', 'SAR', 'PSE', 'SEED', 'SDMGR', 'LayoutXLM', 'LayoutLM' ] device = 'cpu' if use_gpu: device = 'gpu:{}'.format(dist.ParallelEnv().dev_id) if use_xpu: device = 'xpu' device = paddle.set_device(device) config['Global']['distributed'] = dist.get_world_size() != 1 if config['Global']['use_visualdl'] and dist.get_rank() == 0: from visualdl import LogWriter save_model_dir = config['Global']['save_model_dir'] vdl_writer_path = '{}/vdl/'.format(save_model_dir) os.makedirs(vdl_writer_path, exist_ok=True) vdl_writer = LogWriter(logdir=vdl_writer_path) else: vdl_writer = None print_dict(config, logger) logger.info('train with paddle {} and device {}'.format(paddle.__version__, device)) return config, device, logger, vdl_writer
49ecf9c3bc9e6154360a84f402d8b669580b6dd3
368
https://github.com/PaddlePaddle/PaddleOCR.git
472
def preprocess(is_train=False): FLAGS = ArgsParser().parse_args() profiler_options = FLAGS.profiler_options config = load_config(FLAGS.config) config = merge_config(config, FLAGS.opt) profile_dic = {"profiler_options": FLAGS.profiler_options} config = merge_config(config, profile_dic) if is_train: # save_config save_model_dir = config['Global']['save_model_dir'] os.makedirs(save_model_dir, exist_ok=True) with open(os.path.join(save_model_dir, 'config.yml'), 'w') as f: yaml.dump( dict(config), f, default_flow_style=False, sort_keys=False) log_file = '{}/train.log'.format(save_model_dir) else: log_file = None logger = get_logger(name='root', log_file=log_file) # check if set use_gpu=True in paddlepaddle cpu version use_gpu = config['Global']['use_gpu'] check_gpu(use_gpu) # check if set use_xpu=True in paddlepaddle cpu/gpu version use_xpu = False if 'use_xpu' in config['Global']: use_xpu = config['Global']['use_xpu'] check_xpu(use_xpu) alg = config['Architecture']['algorithm'] assert alg in [ 'EAST', 'DB', 'SAST', 'Rosetta', 'CRNN', 'STARNet', 'RARE', 'SRN', 'CLS', 'PGNet', 'Distillation', 'NRTR', 'TableAttn', 'SAR', 'PSE', 'SEED', 'SDMGR', 'LayoutXLM', 'LayoutLM' ] device = 'cpu' if use_gpu: device = 'gpu:{}'.format(dist.ParallelEnv().dev_id) if use_xpu: device = 'xpu' device = paddle.set_device(device) config['Global']['distributed'] = dist.get_world_size() != 1 if
50
646
preprocess
8
0
1
5
homeassistant/components/iglo/light.py
318,241
Improve type hints in light [a-i] (#75936) * Improve type hints in ads light * Improve type hints in avea light * Improve type hints in avion light * Improve type hints in broadlink light * More type hints * One more
core
11
Python
8
light.py
def max_mireds(self) -> int: return math.ceil( color_util.color_temperature_kelvin_to_mired(self._lamp.min_kelvin) )
20fec104e2a11b1a5164d7fe779eb0d894e098cf
24
https://github.com/home-assistant/core.git
40
def max_mireds(self) -> int: return math.ceil( color_util.color_temperature_kelvin_to_mired(self._la
9
41
max_mireds
67
0
3
11
nuitka/build/inline_copy/clcache/clcache/caching.py
178,643
Scons: The clcache was hashing the same files over and over * This might now be dead code that we no longer use direct mode
Nuitka
12
Python
53
caching.py
def getFileHash(filePath, additionalData=None): key = (filePath, additionalData) if key in _hash_cache: return _hash_cache[key] hasher = HashAlgorithm() with open(filePath, "rb") as inFile: hasher.update(inFile.read()) if additionalData is not None: # Encoding of this additional data does not really matter # as long as we keep it fixed, otherwise hashes change. # The string should fit into ASCII, so UTF8 should not change anything hasher.update(additionalData.encode("UTF-8")) _hash_cache[key] = hasher.hexdigest() return _hash_cache[key]
24bc8e9a4949b34772d2d2eb8342f3fd74ec3d5c
83
https://github.com/Nuitka/Nuitka.git
129
def getFileHash(filePath, additionalData=None): key = (filePath, additionalData) if key in _hash_cache: return _hash_cache[key] hasher = HashAlgorithm() with open(filePath, "rb") as inFile: hasher.update(inFile.read()) if additionalData is not None: # Encod
13
140
getFileHash
75
0
3
18
python/ray/tune/tests/test_logger.py
132,529
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
13
Python
47
test_logger.py
def _validate_json_result(self, config): # Check result logs results = [] result_file = os.path.join(self.test_dir, EXPR_RESULT_FILE) with open(result_file, "rt") as fp: for row in fp.readlines(): results.append(json.loads(row)) self.assertEqual(len(results), 3) self.assertSequenceEqual( [int(row["episode_reward_mean"]) for row in results], [4, 5, 6] ) # Check json saved config file config_file = os.path.join(self.test_dir, EXPR_PARAM_FILE) with open(config_file, "rt") as fp: loaded_config = json.load(fp) self.assertEqual(loaded_config, config) # Check pickled config file config_file = os.path.join(self.test_dir, EXPR_PARAM_PICKLE_FILE) with open(config_file, "rb") as fp: loaded_config = cloudpickle.load(fp) self.assertEqual(loaded_config, config)
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
172
https://github.com/ray-project/ray.git
238
def _validate_json_result(self, config): # Check result logs results = [] result_file = os.path.join(self.test_dir, EXPR_RESULT_FILE) with open(result_file, "rt") as fp: for row in fp.readlines(): results.append(json.loads(row)) self.assertEqual(len(results), 3) self.assertSequenceEqual( [int(row["episode_reward_mean"]) for row in results], [4, 5, 6] ) # Check json saved config file config_file = os.path.join(self.test_dir, EXPR_PARAM_FILE) with open(config_file, "rt") as fp: loaded_con
27
281
_validate_json_result
55
0
1
25
keras/callbacks_test.py
269,989
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
12
Python
47
callbacks_test.py
def test_TensorBoard_autoTrace_profileBatchRange(self): model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch="1,3", write_graph=False, ) model.fit( x, y, batch_size=4, epochs=2, validation_data=(x, y), callbacks=[tb_cbk], ) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.tensors, { # Trace will be logged once at the batch it stops profiling. _ObservedSummary(logdir=self.train_dir, tag="batch_3"), }, ) self.assertEqual(1, self._count_trace_file(logdir=self.logdir))
84afc5193d38057e2e2badf9c889ea87d80d8fbf
147
https://github.com/keras-team/keras.git
297
def test_TensorBoard_autoTrace_profileBatchRange(self): model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch="1,3", write_graph=False, ) model.fit( x, y, batch_size=4, epochs=2, validation_data=(x, y), callbacks=[tb_cbk], ) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.tensors, { # Trace will be logged once at the batch it stops profiling. _ObservedSummary(logdir=self.train_dir, tag="batch_3"), }, ) self.assertEqual(1, self._count_trace_file(logdir=sel
28
218
test_TensorBoard_autoTrace_profileBatchRange
49
0
1
18
tests/components/forked_daapd/test_browse_media.py
288,007
Cleanup add browse media forked daapd #79009 (#79157)
core
14
Python
40
test_browse_media.py
async def test_async_browse_image_missing(hass, hass_client, config_entry, caplog): with patch( "homeassistant.components.forked_daapd.media_player.ForkedDaapdAPI", autospec=True, ) as mock_api: config_entry.add_to_hass(hass) await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() client = await hass_client() mock_api.return_value.full_url = lambda x: "http://owntone_instance/" + x mock_api.return_value.get_track.return_value = {} media_content_id = create_media_content_id( "title", media_type=MediaType.TRACK, id_or_path="456" ) resp = await client.get( f"/api/media_player_proxy/{TEST_MASTER_ENTITY_NAME}/browse_media/{MediaType.TRACK}/{media_content_id}" ) assert resp.status == HTTPStatus.INTERNAL_SERVER_ERROR
b043a6ba887e8f925cfa97f3edf66b6f6d7fe4af
110
https://github.com/home-assistant/core.git
171
async def test_async_browse_image_missing(hass, hass_client, config_entry, caplog): with patch( "homeassistant.components.forked_daapd.media_player.ForkedDaapdAPI", autospec=True, ) as mock_api: config_entry.add_to_hass(hass) await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_don
30
202
test_async_browse_image_missing
129
0
6
29
pandas/tests/groupby/transform/test_transform.py
166,461
DEPR: groupby numeric_only default (#47025)
pandas
17
Python
87
test_transform.py
def test_transform_axis_1_reducer(request, reduction_func): # GH#45715 if reduction_func in ( "corrwith", "idxmax", "idxmin", "ngroup", "nth", ): marker = pytest.mark.xfail(reason="transform incorrectly fails - GH#45986") request.node.add_marker(marker) if reduction_func == "mad": warn = FutureWarning msg = "The 'mad' method is deprecated" elif reduction_func in ("sem", "std"): warn = FutureWarning msg = "The default value of numeric_only" else: warn = None msg = "" df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}, index=["x", "y"]) with tm.assert_produces_warning(warn, match=msg): result = df.groupby([0, 0, 1], axis=1).transform(reduction_func) if reduction_func == "size": # size doesn't behave in the same manner; hardcode expected result expected = DataFrame(2 * [[2, 2, 1]], index=df.index, columns=df.columns) else: warn = FutureWarning if reduction_func == "mad" else None with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"): expected = df.T.groupby([0, 0, 1]).transform(reduction_func).T tm.assert_equal(result, expected)
7c054d6a256fd0186befe03acf9e9e86d81668d6
237
https://github.com/pandas-dev/pandas.git
298
def test_transform_axis_1_reducer(request, reduction_func): # GH#45715 if reduction_func in ( "corrwith", "idxmax", "idxmin", "ngroup", "nth", ): marker = pytest.mark.xfail(reason="transform incorrectly fails - GH#45986") request.node.add_marker(marker) if reduction_func == "mad": warn = Future
27
394
test_transform_axis_1_reducer
82
0
1
11
numpy/lib/tests/test_arraysetops.py
160,658
MAINT: bool instead of np.bool_ dtype
numpy
10
Python
51
test_arraysetops.py
def test_in1d_hit_alternate_algorithm(self): # Need extreme range to hit standard code # This hits it without the use of method='dictionary' a = np.array([5, 4, 5, 3, 4, 4, 1e9], dtype=np.int64) b = np.array([2, 3, 4, 1e9], dtype=np.int64) expected = np.array([0, 1, 0, 1, 1, 1, 1], dtype=bool) assert_array_equal(expected, in1d(a, b)) assert_array_equal(np.invert(expected), in1d(a, b, invert=True)) a = np.array([5, 7, 1, 2], dtype=np.int64) b = np.array([2, 4, 3, 1, 5, 1e9], dtype=np.int64) ec = np.array([True, False, True, True]) c = in1d(a, b, assume_unique=True) assert_array_equal(c, ec)
d7e2582cd33b22a767286e8a3d95b336dfe51a34
195
https://github.com/numpy/numpy.git
173
def test_in1d_hit_alternate_algorithm(self): # Need extreme range to hit standard code # This hits it without the use of method='dictionary' a = np.array([5, 4, 5, 3, 4, 4, 1e9], dtype=np.int64) b = np.array([2, 3, 4, 1e9], dtype=np.int64) expected = np.arra
16
271
test_in1d_hit_alternate_algorithm
10
0
1
24
python/ray/tests/test_component_failures.py
131,453
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
6
Python
10
test_component_failures.py
def test_dying_driver_wait(ray_start_regular): # Start the Ray processes. address_info = ray_start_regular
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
145
https://github.com/ray-project/ray.git
15
def test_dying_driver_wait(ray_start_regular): # Start
3
15
test_dying_driver_wait
34
0
2
16
saleor/tax/migrations/0002_add_default_tax_configs.py
29,529
Simple (flat rate) taxes API (#9784) * Add empty tax module * Add tax models (#9839) * Add tax API queries (#9856) * Add MANAGE_TAXES permission * Add tax configuration queries * Create tax configuration when channel is created * Drop sorters for now * Add TaxConfigurationPerCountry type * Update migration * Add metadata to TaxConfiguration type * Add tests for tax configuration queries * Add TaxClass types * Improve tests * Add queries for tax configuration per country * Fix query in tests * Update query cost map * Add tax API mutations (#9934) * Add taxConfigurationUpdate mutation * Update schema * Add tax class CRUD mutations * Add mutations to update/delete tax class rates per country * Review fixes * Add taxClass field to ProductType type (#9999) * Add taxClass field to ProductType type * Add taxClass field to Product type * Add taxClass field to shipping method type * Add displayGrossPrices to ProductPricingInfo (#10008) * Add displayGrossPrices to ProductPricingInfo * Add displayGrossPrices to Checkout * Add displayGrossPrices to Order * Add tests * Add ADDED_IN_35 label to new fields' descriptions * Use new display_gross_prices flag (#10121) * Use new display_gross_prices flag * Update tests * Add tests * Review fixes * Drop Vatlayer (#10335) * Add migration from Vatlayer to simple taxes * Review fixes * Review fixes * Drop usages of global include_taxes_in_prices flag (#10406) * Drop `include_taxes_in_prices` function from site settings * Adjust tests * Review fixes * Drop the `charge_taxes_on_shipping` flag from site settings. (#10466) * Include migrating Avatax tax codes in tax class migration * Drop `charge_taxes_on_shipping` function * Add tax_class to ShippingMethodData * Review fixes * Always calculate shipping tax with Avalara * Add default country rate (#10497) * Allow setting default tax rate for a country (without providing a tax class) * Add validation to allow settings only one default rate at once * Code review fixes * Add taxCalculationStrategy field * Add tests * CR fixes * Adjust resolver to use new tax configuration (#10533) * CR fixes * Add database router to fix false positives on relation mismatch. (#10524) * Add database router to fix false positives on relation mismatch. * The db router should have only 'allow_relation' implemented. * The 'db_for_write' part should stay. * Subscription for sync tax webooks (#10433) * Add proposed changes to schema * Add base implementation for sync tax subscription * Add test for empty order * Add clean up and missing part for tests * Use subscription for tax webhooks. Add more tests * Improve descriptions for tax objects * Adjust resolver to use new tax configuration (#10533) * Add taxCalculationStrategy field (#10532) * Add taxCalculationStrategy field * Add tests * CR fixes * CR fixes * Add datamigration to populate taxCalculationStrategy * Migrate Product.charge_taxes to new tax configuration (#10585) * Migrate Product.charge_taxes field to new tax configuration * Rename function * Fix tests * Change assign_tax_code_to_object_meta function to support tax classes * Update tax class fixtures * Improve dataloader * CR fixes * CR fixes * Add deprecation notice to dataloader * Allow removing country rates in the `taxCountryConfigurationUpdate` mutation. (#10647) * Allow deleting rates in taxCountryConfigurationUpdate mutation * Change tax rates ordering to keep default rates first (with null tax classes) * Update existing migration * Remove TaxClass.is_default field (#10660) * Change tax rates ordering to keep default rates first (with null tax classes) * Update existing migration * Drop is_default field from TaxClass model * Drop extra Avalara config (#10673) * Drop extra Avatax config options * Adjust tests * Use flat rates in tax calculations (#10747) * WIP Use new tax configuration in tax calculations * Use new tax calculations for checkout * Adjust tests * Add flat rates calculations for checkout and order * Calculate flat rates in product pricing objects * Adjust tests * Add tests for order calculations * Add tests for product queries tax calculations * Add tests for order calculations * Use base calculations to get default checkout shipping price * Add tests for using tax_class from product_type * Add tests for get_order_country * Adjust tests * Code review fixes * Drop update_taxes_for_order_lines (#11000) * Fix calls to Avalara not validating order (#11012) * Add validation to disallow creating negative rates (#11010) * Add missing recalculation of order.undiscounted_total (#11039) * Optimize getting tax class country rates (#11040) * Tax API adjustments for dashboard (#11042) * Ignore null rates in taxCountryConfigurationUpdate mutation * Allow to pass null rates in taxClassUpdate mutation * Improve tests * Update saleor/graphql/tax/mutations/tax_class_update.py Co-authored-by: Krzysztof Waliczek <krzysztof.waliczek@saleor.io> * Update schema Co-authored-by: Krzysztof Waliczek <krzysztof.waliczek@saleor.io> * Cleanup before release (#11049) * Update ADDED_IN labels * Fix skippeded test * Regenerate migrations * Deprecate CountryDisplay.vat field * Add changelog * Update order.undiscounted_total calculation to not include taxes (#11068) * Fix assigning rates to tax classes (#11105) * Allow all staff users and apps to query tax-related data (#11113) * Bump dependencies for origin/SALEOR-6391-simple-taxes (#11127) Bumps: - cryptography to 38.0.3 - pillow to 9.3.0 * Fix using tax code from product and product type's tax class (#11111) * Fix using tax code from product and product type's tax class * Extract function * Replace synchronous load_site with promise (#11165) * Denormalize tax class for order lines and orders (#11172) * WIP Denormalize tax class for order lines and orders * Add denormalized fields in GraphQL types * Add tests for denormalized API fields * Return 0 rate in API when rate is null * Add preview/version notes in new field descriptions * Update changelog Co-authored-by: Dominik Kozaczko <dominik@kozaczko.info> Co-authored-by: Maciej Korycinski <maciej@mirumee.com> Co-authored-by: Krzysztof Waliczek <krzysztof.waliczek@saleor.io> Co-authored-by: Mika <6186720+NyanKiyoshi@users.noreply.github.com> Co-authored-by: Krzysztof Kwaśniak <mr.brzys@gmail.com>
saleor
13
Python
29
0002_add_default_tax_configs.py
def add_tax_configuration_for_channels(apps, schema_editor): Channel = apps.get_model("channel", "Channel") TaxConfiguration = apps.get_model("tax", "TaxConfiguration") SiteSettings = apps.get_model("site", "SiteSettings") site_settings = SiteSettings.objects.first() tax_configurations = [] for channel in Channel.objects.all(): tax_configurations.append( TaxConfiguration( channel=channel, charge_taxes=True, display_gross_prices=site_settings.display_gross_prices, prices_entered_with_tax=site_settings.include_taxes_in_prices, ) ) TaxConfiguration.objects.bulk_create(tax_configurations)
67df28935c555fdd673f17e8c9183e24dde7c51f
97
https://github.com/saleor/saleor.git
150
def add_tax_configuration_for_channels(apps, schema_editor): Channel = apps.get_model("channel", "Channel") TaxConfiguration = apps.get_model("tax", "TaxConfiguration") SiteSettings = apps.get_model("site", "SiteSettings") site_settings = SiteSettings.objects.first() tax_configurations = [] fo
19
160
add_tax_configuration_for_channels
11
0
3
5
django/core/management/base.py
204,592
Refs #33476 -- Reformatted code with Black.
django
10
Python
10
base.py
def handle_default_options(options): if options.settings: os.environ["DJANGO_SETTINGS_MODULE"] = options.settings if options.pythonpath: sys.path.insert(0, options.pythonpath)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
38
https://github.com/django/django.git
34
def handle_default_options(options): if options.set
9
64
handle_default_options
14
0
3
7
python/ray/data/impl/plan.py
138,505
[Datasets] [Out-of-Band Serialization: 2/3] Refactor `ExecutionPlan` to maintain complete lineage and eagerly unlink block references. (#23931) This PR refactors ExecutionPlan to maintain complete stage lineage, even for eagerly computed datasets, while ensuring that block references are unlinked as early as possible in order to more eagerly release block memory. This PR is the final precursor to adding the actual out-of-band serialization APIs (PR 3/3). The fully lineage has to be maintained, even for eagerly computed datasets, since the lineage is needed for out-of-band serialization of datasets.
ray
10
Python
12
plan.py
def is_read_stage(self) -> bool: return ( self.has_lazy_input() and not self._stages_before_snapshot and not self._stages_after_snapshot )
9ee24530abf1b5e3239869b5257dd7b678337b90
26
https://github.com/ray-project/ray.git
68
def is_read_stage(self) -> bool: return ( sel
6
45
is_read_stage
30
0
1
9
keras/layers/core/einsum_dense_test.py
272,547
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
11
Python
28
einsum_dense_test.py
def test_unspecified_output_dim_fails(self): input_tensor = keras.Input(shape=(32,)) layer = einsum_dense.EinsumDense(equation="ab,bc->cd", output_shape=64) with self.assertRaisesRegex( ValueError, ".*Dimension 'd' was specified in the output 'cd' but has " "no corresponding dim.*", ): _ = layer(input_tensor)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
50
https://github.com/keras-team/keras.git
101
def test_unspecified_output_dim_fails(self): input_tensor = keras.Input(shape=(32,)) layer = einsum_dense.EinsumDense(equation="ab,bc->cd", output_shape=64) with self.assertRaisesRegex( ValueError, ".*Dimension 'd' was specified in the outp
14
86
test_unspecified_output_dim_fails
112
0
15
48
erpnext/stock/utils.py
67,992
style: format code with black
erpnext
21
Python
76
utils.py
def get_incoming_rate(args, raise_error_if_no_rate=True): from erpnext.stock.stock_ledger import ( get_batch_incoming_rate, get_previous_sle, get_valuation_rate, ) if isinstance(args, str): args = json.loads(args) voucher_no = args.get("voucher_no") or args.get("name") in_rate = None if (args.get("serial_no") or "").strip(): in_rate = get_avg_purchase_rate(args.get("serial_no")) elif args.get("batch_no") and frappe.db.get_value( "Batch", args.get("batch_no"), "use_batchwise_valuation", cache=True ): in_rate = get_batch_incoming_rate( item_code=args.get("item_code"), warehouse=args.get("warehouse"), batch_no=args.get("batch_no"), posting_date=args.get("posting_date"), posting_time=args.get("posting_time"), ) else: valuation_method = get_valuation_method(args.get("item_code")) previous_sle = get_previous_sle(args) if valuation_method in ("FIFO", "LIFO"): if previous_sle: previous_stock_queue = json.loads(previous_sle.get("stock_queue", "[]") or "[]") in_rate = ( _get_fifo_lifo_rate(previous_stock_queue, args.get("qty") or 0, valuation_method) if previous_stock_queue else 0 ) elif valuation_method == "Moving Average": in_rate = previous_sle.get("valuation_rate") or 0 if in_rate is None: in_rate = get_valuation_rate( args.get("item_code"), args.get("warehouse"), args.get("voucher_type"), voucher_no, args.get("allow_zero_valuation"), currency=erpnext.get_company_currency(args.get("company")), company=args.get("company"), raise_error_if_no_rate=raise_error_if_no_rate, batch_no=args.get("batch_no"), ) return flt(in_rate)
494bd9ef78313436f0424b918f200dab8fc7c20b
333
https://github.com/frappe/erpnext.git
64
def get_incoming_rate(args, raise_error_if_no_rate=True): from erpnext.stock.stock_ledger import ( get_batch_incoming_rate, get_previous_sle, get_valuation_rate, ) if isinstance(args, str): args = json.loads(args) voucher_no = args.get("voucher_no") or args.get("name") in_rate = None if (args.get("serial_no") or "").strip(): in_rate = get_avg_purchase_rate(args.get("serial_no")) elif args.get("batch_no") and frappe.db.get_value( "Batch", args.get("batch_no"), "use_batchwise_valuation", cache=True ): in_rate = get_batch_incoming_rate( item_code=args.get("item_code"), warehouse=args.get("warehouse"), batch_no=args.get("batch_no"), posting_date=args.get("posting_date"), posting_time=args.get("posting_time"), ) else: valuation_method = get_valuation_method(args.get("item_code")) previous_sle = get_previous_sle(args) if valuation_method in ("FIFO", "LIFO"): if previous_sle: previous_stock_queue = json.loads(previous_sle.get("stock_queue", "[]") or "[]") in_rate = ( _get_fifo_lifo_rate(previous_stock_queue, args.get("qty") or 0, valuation_method)
36
566
get_incoming_rate
34
0
2
11
keras/callbacks.py
269,928
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
11
Python
33
callbacks.py
def _push_writer(self, writer, step): if self.update_freq == "epoch": return should_record = lambda: tf.equal(step % self.update_freq, 0) # TODO(b/151339474): Fix deadlock when not using .value() here. summary_context = ( writer.as_default(step.value()), tf.summary.record_if(should_record), ) self._prev_summary_state.append(summary_context) summary_context[0].__enter__() summary_context[1].__enter__()
84afc5193d38057e2e2badf9c889ea87d80d8fbf
82
https://github.com/keras-team/keras.git
130
def _push_writer(self, writer, step): if self.update_freq == "epoch": return should_record = lambda: tf.equal(step % self.update_freq, 0) # TO
16
133
_push_writer
23
1
1
2
doc/source/ray-core/doc_code/scheduling.py
137,072
[Doc] Revamp ray core scheduling doc (#30675)
ray
6
Python
21
scheduling.py
def small_object_func(): # Small object is returned inline directly to the caller, # instead of storing in the distributed memory. return [1] @ray.remote
1216d5d2d39556895b43e4a6dd8dd7825c3acd30
@ray.remote
8
https://github.com/ray-project/ray.git
30
def small_object_func(): # Small object is returned inline directly to
3
23
small_object_func
25
0
1
8
tests/acceptance/test_performance_issues.py
87,487
test(perf-issues): Add more E2E tests (#40723) - one test verifies that similar events are grouped - one test verifies that dissimilar events are not grouped
sentry
13
Python
23
test_performance_issues.py
def test_with_one_performance_issue(self, mock_now): mock_now.return_value = datetime.utcnow().replace(tzinfo=pytz.utc) - timedelta(minutes=5) event_data = self.create_sample_event(mock_now.return_value.timestamp()) with self.feature(FEATURES): event = self.store_event(data=event_data, project_id=self.project.id) self.page.visit_issue(self.org.slug, event.groups[0].id) self.browser.click('[aria-label="Show Details"]') self.browser.snapshot("performance issue details", desktop_only=True)
8d05e775c1f46209ec515d5267dc9c74ab51a106
113
https://github.com/getsentry/sentry.git
89
def test_with_one_performance_issue(self, mock_now): mock_now.return_value = datetime.utcnow().replace(tzinfo=pytz.utc) - timedelta(minutes=5) event_data = self.create_sample_event(mock_now.return_value.timestamp()) with self.feature(FEATURES): event = self.store_event(data=event_data, project_id=self.project.id) self.page.visit_issue(self.org.s
32
182
test_with_one_performance_issue
197
0
1
15
test/test_modeling_question_answering.py
256,459
Apply black formatting (#2115) * Testing black on ui/ * Applying black on docstores * Add latest docstring and tutorial changes * Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too * Remove comments * Relax constraints on pydoc-markdown * Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade * Fix a couple of bugs * Add a type: ignore that was missing somehow * Give path to black * Apply Black * Apply Black * Relocate a couple of type: ignore * Update documentation * Make Linux CI run after applying Black * Triggering Black * Apply Black * Remove dependency, does not work well * Remove manually double trailing commas * Update documentation Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
haystack
11
Python
89
test_modeling_question_answering.py
def test_inference_different_inputs(bert_base_squad2): qa_format_1 = [ { "questions": ["Who counted the game among the best ever made?"], "text": "Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.", } ] q = Question(text="Who counted the game among the best ever made?") qa_format_2 = QAInput( questions=[q], doc_text="Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.", ) result1 = bert_base_squad2.inference_from_dicts(dicts=qa_format_1) result2 = bert_base_squad2.inference_from_objects(objects=[qa_format_2]) assert result1 == result2
a59bca366174d9c692fa19750c24d65f47660ef7
70
https://github.com/deepset-ai/haystack.git
270
def test_inference_different_inputs(bert_base_squad2): qa_format_1 = [ { "questions": ["Who counted the game among the best ever made?"], "text": "Twilight Pri
16
120
test_inference_different_inputs
10
0
1
3
netbox/wireless/tests/test_filtersets.py
266,028
Closes #10710: Add status field to WirelessLAN
netbox
13
Python
10
test_filtersets.py
def test_status(self): params = {'status': [WirelessLANStatusChoices.STATUS_ACTIVE, WirelessLANStatusChoices.STATUS_DISABLED]} self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
ad40d42dc467940b27021104a8beaee3cce1afaa
43
https://github.com/netbox-community/netbox.git
23
def test_status(self): params = {'status': [WirelessLANStatusChoices.STATUS_ACTIVE, WirelessLANStatusChoices.STATUS_DISABLED]} self.assertEqual(self.filterset(params, self.queryset).q
11
68
test_status
14
0
2
4
rest_framework/request.py
48,634
Replaced parse_header with parse_header_parameters. (#8556) Add a backwards compatibility shim for Django versions that have no (or an incompatible) django.utils.http.parse_header_parameters implementation. Thanks to Shai Berger for review. Co-authored-by: Jaap Roes <jroes@leukeleu.nl>
django-rest-framework
9
Python
13
request.py
def is_form_media_type(media_type): base_media_type, params = parse_header_parameters(media_type) return (base_media_type == 'application/x-www-form-urlencoded' or base_media_type == 'multipart/form-data')
ad282da97cf7b23c50a8fa7b7c5cad68c1deedc3
24
https://github.com/encode/django-rest-framework.git
34
def is_form_media_type(media_type): base_media_type, params = parse_header_parameters(media_type) return (base_media_type == 'application/x-www-form-urlencoded' or base_media_type == 'multipart/form-data')
5
45
is_form_media_type
61
0
4
29
src/diffusers/models/resnet.py
337,010
renamed x to meaningful variable in resnet.py (#677) * renamed single letter variables * renamed x to meaningful variable in resnet.py Hello @patil-suraj can you verify it Thanks * Reformatted using black * renamed x to meaningful variable in resnet.py Hello @patil-suraj can you verify it Thanks * reformatted the files * modified unboundlocalerror in line 374 * removed referenced before error * renamed single variable x -> hidden_state, p-> pad_value Co-authored-by: Nikhil A V <nikhilav@Nikhils-MacBook-Pro.local> Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> Co-authored-by: Suraj Patil <surajp815@gmail.com>
diffusers
12
Python
45
resnet.py
def downsample_2d(hidden_states, kernel=None, factor=2, gain=1): r assert isinstance(factor, int) and factor >= 1 if kernel is None: kernel = [1] * factor kernel = torch.tensor(kernel, dtype=torch.float32) if kernel.ndim == 1: kernel = torch.outer(kernel, kernel) kernel /= torch.sum(kernel) kernel = kernel * gain pad_value = kernel.shape[0] - factor return upfirdn2d_native( hidden_states, kernel.to(device=hidden_states.device), down=factor, pad=((pad_value + 1) // 2, pad_value // 2) )
7265dd8cc82e41624b4a979a22f0d15dba55e956
127
https://github.com/huggingface/diffusers.git
111
def downsample_2d(hidden_states, kernel=None, factor=2, gain=1): r assert isinstance(factor, int) and factor >= 1 if kernel is None:
21
197
downsample_2d
17
0
3
6
modules/image/text_to_image/disco_diffusion_cnclip_vitb16/cn_clip/clip/bert_tokenizer.py
49,724
add disco_diffusion_cnclip_vitb16 module
PaddleHub
11
Python
14
bert_tokenizer.py
def tokenize(self, text): split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens
f4d6e64cdc132ae868699a0ba442f4ab1d304a14
43
https://github.com/PaddlePaddle/PaddleHub.git
63
def tokenize(self, text): split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens
9
67
tokenize
40
0
1
8
onnx/test/shape_inference_test.py
255,603
Use Python type annotations rather than comments (#3962) * These have been supported since Python 3.5. ONNX doesn't support Python < 3.6, so we can use the annotations. Diffs generated by https://pypi.org/project/com2ann/. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Remove MYPY conditional logic in gen_proto.py It breaks the type annotations and shouldn't be needed. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Get rid of MYPY bool from more scripts Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * move Descriptors class above where its referenced in type annotation Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fixes Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * remove extra blank line Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotations Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotation in gen_docs Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix Operators.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix TestCoverage.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix protoc-gen-mypy.py Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
onnx
13
Python
34
shape_inference_test.py
def test_negative_log_likehood_shape_is_NCd1d2_reduction_sum(self) -> None: N, C, d1, d2 = 3, 4, 5, 6 graph = self._make_graph( [("input", TensorProto.FLOAT, (N, C, d1, d2)), ("target", TensorProto.INT64, (N, d1, d2))], [make_node('NegativeLogLikelihoodLoss', ['input', 'target'], ['loss'], reduction='sum')], []) self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, ())]) # type: ignore
83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd
108
https://github.com/onnx/onnx.git
106
def test_negative_log_likehood_shape_is_NCd1d2_reduction_sum(self) -> None: N, C, d1, d2 = 3, 4, 5, 6 graph = self._make_graph( [("input", TensorProto.FLOAT, (N, C
15
162
test_negative_log_likehood_shape_is_NCd1d2_reduction_sum
16
0
2
4
tests/models/test_default_evaluator.py
19,069
Evaluation Default evaluator (#5092) * init Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * rename module Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * revert black change Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * change module path Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * refactor Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * lazy load pyspark Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * revert export Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix curcit import Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix conftest.py Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * Revert "fix conftest.py" This reverts commit 2ea29c62bfffc5461bf77f3da15b5c00f51de19b. * fix tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * default evaluator Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update import Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update hash algo Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update import Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comment Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix lint Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add more tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix lint Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update shap explainer Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * remove scikitplot dep Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add pr curve Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add shap.summary_plot Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * log explainer Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * improve explainer code Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update shap init Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update explainer creating Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update predict_proba Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * refactor Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add multi-class metrics artifacts Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add log_loss metric Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * lazy load pyspark Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address ben comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * prevent show shap logo, add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * support spark model Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add shap version check Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update docs, loose classifier label limit Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * multiclass classifier merge metrics/plots Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * zfill feature name Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add config max_num_classes_threshold_logging_roc_pr_curve_for_multiclass_classifier Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * refactor Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * improve label handling Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * refactor Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * black Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * increase plot dpi Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix test fixture Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix pylint Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * use matplot rc_context Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix shap import Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * refactor EvaluationDataset Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * limit user specify shap algos Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * clean Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update evaluation dataset Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * use svg fig Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * revert svg Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * curve dashline, legend display ap/roc, legend move out Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * linewidth 1 Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * keyword arguments for evaluate, fix tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * mark abc.abstractmethod, kw args for ModelEvaluator methods Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix pylint Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix pylint Signed-off-by: Weichen Xu <weichen.xu@databricks.com>
mlflow
10
Python
13
test_default_evaluator.py
def assert_dict_equal(d1, d2, rtol): for k in d1: assert k in d2 assert np.isclose(d1[k], d2[k], rtol=rtol)
964f5ab75098c55f028f8acfeeae05df35ea68d5
37
https://github.com/mlflow/mlflow.git
32
def assert_dict_equal(d1, d2, rtol): for k in d1:
7
52
assert_dict_equal
21
0
1
9
tests/detectors/test_feature_detector.py
190,878
Feature/optional opencv (#1400) * Removed opencv dependency Now OpenCV is optional and detectors are smart to skip if cv2 could not be imported. Also refactored face detector a bit to make it more maintainable. Now thumbor can be installed with pip install thumbor pip install thumbor[all] pip install thumbor[opencv] pip install thumbor[tests]
thumbor
13
Python
21
test_feature_detector.py
async def test_should_detect_multiple_points(self): with open( abspath("./tests/fixtures/images/no_face.jpg"), "rb" ) as fixture: self.engine.load(fixture.read(), None) await FeatureDetector(self.context, 0, None).detect() detection_result = self.context.request.focal_points expect(len(detection_result)).to_be_greater_than(1) expect(detection_result[0].origin).to_equal("alignment")
d34fd16034e307b545c3e3adfa4d9d472a582cc6
82
https://github.com/thumbor/thumbor.git
84
async def test_should_detect_multiple_points(self): with open( abspath("./tests/fixtures/images/no_face.jpg"), "rb" ) as fixture: self.engine.load(fixture.read(), None) await FeatureDetector(self.context, 0, None).detect() detection_result = self.context.request.focal_points expect(len(detection_result)).to_be_greater_than(1) expect(dete
19
140
test_should_detect_multiple_points
32
0
3
11
keras/distribute/keras_correctness_test_base.py
270,391
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
11
Python
27
keras_correctness_test_base.py
def get_batch_size(global_batch_size, distribution): batch_size = global_batch_size # TODO(b/118776054): Use global batch size for Keras/DS support. use_per_core_batch_size = ( distribution and not distributed_training_utils.global_batch_size_supported( distribution ) ) if use_per_core_batch_size: batch_size //= distribution.num_replicas_in_sync return batch_size
84afc5193d38057e2e2badf9c889ea87d80d8fbf
30
https://github.com/keras-team/keras.git
88
def get_batch_size(global_batch_size, distribution): batch_size = global_batch_size # TODO(b/118776054): Use global batch size for Keras/DS support. use_per_core_batch_size = ( distribution and not distributed_training_utils.global_batch_size_supported( distribution
8
54
get_batch_size
47
0
1
44
tests/crypto/test_keyring.py
250,533
Add missing type hints to tests. (#14687) Adds type hints to tests.metrics and tests.crypto.
synapse
9
Python
33
test_keyring.py
def test_get_multiple_keys_from_perspectives(self) -> None: fetcher = PerspectivesKeyFetcher(self.hs) SERVER_NAME = "server2" testkey1 = signedjson.key.generate_signing_key("ver1") testverifykey1 = signedjson.key.get_verify_key(testkey1) testverifykey1_id = "ed25519:ver1" testkey2 = signedjson.key.generate_signing_key("ver2") testverifykey2 = signedjson.key.get_verify_key(testkey2) testverifykey2_id = "ed25519:ver2" VALID_UNTIL_TS = 200 * 1000 response1 = self.build_perspectives_response( SERVER_NAME, testkey1, VALID_UNTIL_TS, ) response2 = self.build_perspectives_response( SERVER_NAME, testkey2, VALID_UNTIL_TS, )
a4ca770655a6b067468de3d507292ec133fdc5ca
292
https://github.com/matrix-org/synapse.git
211
def test_get_multiple_keys_from_perspectives(self) -> None: fetcher = PerspectivesKeyFetcher(self.hs) SERVER_NAME = "server2" testkey1 = signedjson.key.generate_signing_key("ver1") testverifykey1 = signedjson.key.get_verify_key(testkey1) testverifykey1_id = "ed25519:ver1" testkey2 = signedjson.key.generate_signing_key("ver2") testverifykey2 = signedjson.key.get_verify_key(testkey2) testverifykey2_id = "ed25519:ver2" VALID_UNTIL_TS = 200 * 1000 response1 = self.build_perspectives_response( SERVER_NAME, testkey1, VALID_UNTIL_TS, ) response2 = self.build_perspectives_response( SERVER_NAME, testkey2, VALID_UNTIL_TS, )
20
162
test_get_multiple_keys_from_perspectives
4
0
1
2
wagtail/documents/wagtail_hooks.py
74,922
Reformat with black
wagtail
8
Python
4
wagtail_hooks.py
def construct_admin_api(router): router.register_endpoint("documents", DocumentsAdminAPIViewSet)
d10f15e55806c6944827d801cd9c2d53f5da4186
13
https://github.com/wagtail/wagtail.git
6
def construct_admin_api(router): router.register_endpoint("documents",
4
23
construct_admin_api
43
0
1
16
gensim/test/test_word2vec.py
9,740
streamlining most_similar_cosmul and evaluate_word_analogies (#2656) * streamlining most_similar_cosmul * Fix PR requested changes and add unit test * fix merge artifacts Co-authored-by: n3hrox <n3hrox@gmail.com> Co-authored-by: Michael Penkov <m@penkov.dev>
gensim
11
Python
38
test_word2vec.py
def test_evaluate_word_analogies(self): model = word2vec.Word2Vec(LeeCorpus()) score, sections = model.wv.evaluate_word_analogies(datapath('questions-words.txt')) score_cosmul, sections_cosmul = model.wv.evaluate_word_analogies( datapath('questions-words.txt'), similarity_function='most_similar_cosmul' ) self.assertEqual(score, score_cosmul) self.assertEqual(sections, sections_cosmul) self.assertGreaterEqual(score, 0.0) self.assertLessEqual(score, 1.0) self.assertGreater(len(sections), 0) # Check that dict contains the right keys first_section = sections[0] self.assertIn('section', first_section) self.assertIn('correct', first_section) self.assertIn('incorrect', first_section)
ac3bbcdf87b263f79d5e19cce173e6c709a15f9d
127
https://github.com/RaRe-Technologies/gensim.git
170
def test_evaluate_word_analogies(self): model = word2vec.Word2Vec(LeeCorpus()) score, sections = model.wv.evaluate_word_analogies(datapath('questions-words.txt')) score_cosmul, sections_cosmul = model.wv.evaluate_word_analogies( datapath('questions-words.txt'), similarity_function='most_similar_cosmul' ) self.assertEqual(score, score_cosmul) self.assertEqual(sections, sections_cosmul) self.assertGreaterEqual(score, 0.0) self.assertLessEqual(score, 1.0) self.assertGreater(len(sections), 0) # Check that dict contains the right keys first_section = sections[0] self.assertIn('sec
21
206
test_evaluate_word_analogies
21
0
1
17
keras/engine/data_adapter_test.py
271,155
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
9
Python
18
data_adapter_test.py
def test_shuffle_correctness(self): num_samples = 100 batch_size = 32 x = np.arange(num_samples) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle=True, epochs=2 )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
125
https://github.com/keras-team/keras.git
73
def test_shuffle_correctness(self): num_samples = 100 batch_size = 32 x = np.arange(num_samples) np.random.seed(99)
14
78
test_shuffle_correctness
146
0
9
50
pandas/core/internals/blocks.py
164,658
REF: standardize patterns in Block Methods (#45840)
pandas
19
Python
97
blocks.py
def putmask(self, mask, new) -> list[Block]: orig_mask = mask values = cast(np.ndarray, self.values) mask, noop = validate_putmask(values.T, mask) assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) if new is lib.no_default: new = self.fill_value new = self._standardize_fill_value(new) new = extract_array(new, extract_numpy=True) if noop: return [self] try: casted = np_can_hold_element(values.dtype, new) putmask_without_repeat(values.T, mask, casted) return [self] except LossySetitemError: if self.ndim == 1 or self.shape[0] == 1: # no need to split columns if not is_list_like(new): # using just new[indexer] can't save us the need to cast return self.coerce_to_target_dtype(new).putmask(mask, new) else: indexer = mask.nonzero()[0] nb = self.setitem(indexer, new[indexer]) return [nb] else: is_array = isinstance(new, np.ndarray) res_blocks = [] nbs = self._split() for i, nb in enumerate(nbs): n = new if is_array: # we have a different value per-column n = new[:, i : i + 1] submask = orig_mask[:, i : i + 1] rbs = nb.putmask(submask, n) res_blocks.extend(rbs) return res_blocks
21bbee62e371068896735946c3c0c2ab1f349fda
269
https://github.com/pandas-dev/pandas.git
656
def putmask(self, mask, new) -> list[Block]: orig_mask = mask values = cast(np.ndarray, self.values) mask, noop = validate_putmask(values.T, mask) assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) if new is lib.no_default: new = self.fill_value new = self._standardize_fill_value(new) new = extract_array(new, extract_numpy=True) if noop: return [self] try: casted = np_can_hold_element(values.dtype, new) putmask_without_repeat(values.T, mask, casted) return [self] except LossySetitemError: if self.ndim == 1 or self.shape[0] == 1: # no need to split columns if not is_list_like(new): # using just new[indexer] can't save us the need to cast return self.co
47
418
putmask
14
0
2
5
homeassistant/components/overkiz/select.py
309,472
Add select entity to Overkiz integration (#62916)
core
10
Python
13
select.py
def current_option(self) -> str | None: if state := self.device.states.get(self.entity_description.key): return str(state.value) return None
5e3bfabfcfb2a65e68e14bd21bddb2c37df85b6c
37
https://github.com/home-assistant/core.git
46
def current_option(self) -> str | None: if state := self.device.states
10
61
current_option
51
1
2
9
tests/daemon/unit/stores/test_peapodstore.py
10,969
refactor: rename pod to deployment (#4230) * refactor: rename pod to deployment * style: fix overload and cli autocomplete * fix: undo daemon mistake * refactor: leftover cleanup * fix: more test fixes * fix: more fixes * fix: more fixes * fix: more fixes * fix: more tests * fix: fix more tests * refactor: fix more tests * refactor: more tests fixes * refactor: rename pea to pod * refactor: adjust docs * refactor: complete pea renaming * refactor: more fixes * fix: pea_type in k8s yamls * fix: adjust pod args name * refactor: rename peapods parser folder * fix: da init Co-authored-by: Jina Dev Bot <dev-bot@jina.ai>
jina
12
Python
38
test_peapodstore.py
async def test_podpod_store_multi_add(model, store, type, workspace): s = store() for j in range(5): id = DaemonID(f'j{type}') await s.add(id=id, params=model, workspace_id=workspace, ports={}) assert len(s) == j + 1 assert id in s await s.clear() assert not s @pytest.mark.asyncio @pytest.mark.parametrize( 'model, store, id', [ (PodModel(), PodStore, DaemonID(f'jpod')), # (PodModel(), PodStore, DaemonID(f'jpod')), ], )
13edc16d806fb5d77a6849551178ccc75937f25f
@pytest.mark.asyncio @pytest.mark.parametrize( 'model, store, id', [ (PodModel(), PodStore, DaemonID(f'jpod')), # (PodModel(), PodStore, DaemonID(f'jpod')), ], )
75
https://github.com/jina-ai/jina.git
110
async def test_podpod_store_multi_add(model, store, type, workspace): s = store() for j in range(5): id = DaemonID(f'j{type}') await s.add(id=id, params=model, workspace_id=workspace, ports={}) assert len(s) == j + 1 assert id in s awa
22
173
test_podpod_store_multi_add
5
1
1
2
tests/integration/external_pod/test_external_pod.py
10,385
feat: export Flow into set of k8s yamls (#4089)
jina
8
Python
5
test_external_pod.py
def external_pod_shards_1(external_pod_shards_1_args): return Pod(external_pod_shards_1_args) @pytest.fixture(scope='function')
6e9e7ef32f61cab04c6efc7a9f21659d26b50fdb
@pytest.fixture(scope='function')
10
https://github.com/jina-ai/jina.git
6
def external_pod_shards_1(external_pod_shards_1_args): return Pod(external_pod_shards_1_args) @pyte
6
34
external_pod_shards_1
126
0
1
50
tests/exchange/test_exchange.py
149,288
Update trading_fee naming
freqtrade
13
Python
71
test_exchange.py
def test_fetch_trading_fees(default_conf, mocker): api_mock = MagicMock() tick = { '1INCH/USDT:USDT': { 'info': {'user_id': '', 'taker_fee': '0.0018', 'maker_fee': '0.0018', 'gt_discount': False, 'gt_taker_fee': '0', 'gt_maker_fee': '0', 'loan_fee': '0.18', 'point_type': '1', 'futures_taker_fee': '0.0005', 'futures_maker_fee': '0'}, 'symbol': '1INCH/USDT:USDT', 'maker': 0.0, 'taker': 0.0005}, 'ETH/USDT:USDT': { 'info': {'user_id': '', 'taker_fee': '0.0018', 'maker_fee': '0.0018', 'gt_discount': False, 'gt_taker_fee': '0', 'gt_maker_fee': '0', 'loan_fee': '0.18', 'point_type': '1', 'futures_taker_fee': '0.0005', 'futures_maker_fee': '0'}, 'symbol': 'ETH/USDT:USDT', 'maker': 0.0, 'taker': 0.0005} } exchange_name = 'gateio' default_conf['dry_run'] = False default_conf['trading_mode'] = TradingMode.FUTURES default_conf['margin_mode'] = MarginMode.ISOLATED api_mock.fetch_trading_fees = MagicMock(return_value=tick) mocker.patch('freqtrade.exchange.Exchange.exchange_has', return_value=True) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) assert '1INCH/USDT:USDT' in exchange._trading_fees assert 'ETH/USDT:USDT' in exchange._trading_fees assert api_mock.fetch_trading_fees.call_count == 1 api_mock.fetch_trading_fees.reset_mock() ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, "fetch_trading_fees", "fetch_trading_fees") api_mock.fetch_trading_fees = MagicMock(return_value={}) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) exchange.fetch_trading_fees() mocker.patch('freqtrade.exchange.Exchange.exchange_has', return_value=True) assert exchange.fetch_trading_fees() == {}
f5578aba48f174190697ac63908b3d3993c3a10c
292
https://github.com/freqtrade/freqtrade.git
673
def test_fetch_trading_fees(default_conf, mocker): api_mock = MagicMock() tick = { '1INCH/USDT:USDT': { 'info': {'user_id': '', 'taker_fee': '0.0018', 'maker_fee': '0.0018', 'gt_discount': False, 'gt
21
523
test_fetch_trading_fees
45
0
4
16
keras/legacy_tf_layers/base.py
274,228
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
13
Python
38
base.py
def add_loss(self, losses, inputs=None): previous_losses_length = len(self._losses) previous_callable_losses_length = len(self._callable_losses) super().add_loss(losses, inputs=inputs) if not tf.executing_eagerly(): # TODO(fchollet): deprecate collection below. new_losses = self._losses[previous_losses_length:] new_callable_losses = self._callable_losses[ previous_callable_losses_length: ] for regularizer in new_callable_losses: loss_tensor = regularizer() if loss_tensor is not None: new_losses.append(loss_tensor) _add_elements_to_collection( new_losses, tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
101
https://github.com/keras-team/keras.git
228
def add_loss(self, losses, inputs=None): previous_losses_length = len(self._losses) previous_callable_losses_length = len(self._callable_losses) super().add_loss(losses, inputs=inputs) if not tf.executing_eagerly(): # TODO(fchollet): deprecate collection below. new_losses = self._losses[previous_losses_length:] new_callable_losses = self._callable_losses[ previous_callable_losses_length: ] for regularizer in new_callable_losses: loss_tensor = regularizer() if loss_tensor is not N
22
160
add_loss
16
0
2
6
certbot-apache/certbot_apache/_internal/override_gentoo.py
186,675
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <ferrand.ad@gmail.com>
certbot
10
Python
14
override_gentoo.py
def parse_sysconfig_var(self) -> None: defines = apache_util.parse_define_file(self.apacheconfig_filep, "APACHE2_OPTS") for k, v in defines.items(): self.variables[k] = v
7d9e9a49005de7961e84d2a7c608db57dbab3046
39
https://github.com/certbot/certbot.git
95
def parse_sysconfig_var(self) -> None: defines = apache_util.parse_define_file(self.apacheconfig_filep, "APACHE2_OPTS")
10
65
parse_sysconfig_var
63
0
7
17
python3.10.4/Lib/distutils/command/build_clib.py
222,661
add python 3.10.4 for windows
XX-Net
12
Python
46
build_clib.py
def run(self): if not self.libraries: return # Yech -- this is cut 'n pasted from build_ext.py! from distutils.ccompiler import new_compiler self.compiler = new_compiler(compiler=self.compiler, dry_run=self.dry_run, force=self.force) customize_compiler(self.compiler) if self.include_dirs is not None: self.compiler.set_include_dirs(self.include_dirs) if self.define is not None: # 'define' option is a list of (name,value) tuples for (name,value) in self.define: self.compiler.define_macro(name, value) if self.undef is not None: for macro in self.undef: self.compiler.undefine_macro(macro) self.build_libraries(self.libraries)
8198943edd73a363c266633e1aa5b2a9e9c9f526
126
https://github.com/XX-net/XX-Net.git
282
def run(self): if not self.libraries: return # Yech -- this is cut 'n pasted from build_ext.py! from distutils.ccompiler import new_compiler self.compiler = new_compiler(compiler=self.comp
20
195
run
27
0
1
7
pandas/tests/io/excel/test_readers.py
171,269
STYLE: fix pylint reimported warnings (#49645) * STYLE: fix pylint reimported warnings * fixup! STYLE: fix pylint reimported warnings
pandas
10
Python
21
test_readers.py
def test_read_from_pathlib_path(self, read_ext): # GH12655 str_path = "test1" + read_ext expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0) path_obj = Path("test1" + read_ext) actual = pd.read_excel(path_obj, sheet_name="Sheet1", index_col=0) tm.assert_frame_equal(expected, actual)
289f32df5a565848adbc0adc8949fa4066542316
64
https://github.com/pandas-dev/pandas.git
68
def test_read_from_pathlib_path(self, read_ext): # GH12655 str_path = "test1" + read_ext expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0)
14
99
test_read_from_pathlib_path
272
0
1
103
.venv/lib/python3.8/site-packages/pip/_vendor/idna/uts46data.py
62,759
upd; format
transferlearning
8
Python
171
uts46data.py
def _seg_5(): return [ (0x20D, 'V'), (0x20E, 'M', 'ȏ'), (0x20F, 'V'), (0x210, 'M', 'ȑ'), (0x211, 'V'), (0x212, 'M', 'ȓ'), (0x213, 'V'), (0x214, 'M', 'ȕ'), (0x215, 'V'), (0x216, 'M', 'ȗ'), (0x217, 'V'), (0x218, 'M', 'ș'), (0x219, 'V'), (0x21A, 'M', 'ț'), (0x21B, 'V'), (0x21C, 'M', 'ȝ'), (0x21D, 'V'), (0x21E, 'M', 'ȟ'), (0x21F, 'V'), (0x220, 'M', 'ƞ'), (0x221, 'V'), (0x222, 'M', 'ȣ'), (0x223, 'V'), (0x224, 'M', 'ȥ'), (0x225, 'V'), (0x226, 'M', 'ȧ'), (0x227, 'V'), (0x228, 'M', 'ȩ'), (0x229, 'V'), (0x22A, 'M', 'ȫ'), (0x22B, 'V'), (0x22C, 'M', 'ȭ'), (0x22D, 'V'), (0x22E, 'M', 'ȯ'), (0x22F, 'V'), (0x230, 'M', 'ȱ'), (0x231, 'V'), (0x232, 'M', 'ȳ'), (0x233, 'V'), (0x23A, 'M', 'ⱥ'), (0x23B, 'M', 'ȼ'), (0x23C, 'V'), (0x23D, 'M', 'ƚ'), (0x23E, 'M', 'ⱦ'), (0x23F, 'V'), (0x241, 'M', 'ɂ'), (0x242, 'V'), (0x243, 'M', 'ƀ'), (0x244, 'M', 'ʉ'), (0x245, 'M', 'ʌ'), (0x246, 'M', 'ɇ'), (0x247, 'V'), (0x248, 'M', 'ɉ'), (0x249, 'V'), (0x24A, 'M', 'ɋ'), (0x24B, 'V'), (0x24C, 'M', 'ɍ'), (0x24D, 'V'), (0x24E, 'M', 'ɏ'), (0x24F, 'V'), (0x2B0, 'M', 'h'), (0x2B1, 'M', 'ɦ'), (0x2B2, 'M', 'j'), (0x2B3, 'M', 'r'), (0x2B4, 'M', 'ɹ'), (0x2B5, 'M', 'ɻ'), (0x2B6, 'M', 'ʁ'), (0x2B7, 'M', 'w'), (0x2B8, 'M', 'y'), (0x2B9, 'V'), (0x2D8, '3', ' ̆'), (0x2D9, '3', ' ̇'), (0x2DA, '3', ' ̊'), (0x2DB, '3', ' ̨'), (0x2DC, '3', ' ̃'), (0x2DD, '3', ' ̋'), (0x2DE, 'V'), (0x2E0, 'M', 'ɣ'), (0x2E1, 'M', 'l'), (0x2E2, 'M', 's'), (0x2E3, 'M', 'x'), (0x2E4, 'M', 'ʕ'), (0x2E5, 'V'), (0x340, 'M', '̀'), (0x341, 'M', '́'), (0x342, 'V'), (0x343, 'M', '̓'), (0x344, 'M', '̈́'), (0x345, 'M', 'ι'), (0x346, 'V'), (0x34F, 'I'), (0x350, 'V'), (0x370, 'M', 'ͱ'), (0x371, 'V'), (0x372, 'M', 'ͳ'), (0x373, 'V'), (0x374, 'M', 'ʹ'), (0x375, 'V'), (0x376, 'M', 'ͷ'), (0x377, 'V'), ]
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
729
https://github.com/jindongwang/transferlearning.git
577
def _seg_5(): return [ (0x20D, 'V'), (0x20E, 'M', 'ȏ'), (0x20F, 'V'), (0x210, 'M', 'ȑ'), (0x211, 'V'), (0x212, 'M', 'ȓ'), (0x213, 'V'), (0x214, 'M', 'ȕ'), (0x215, 'V'), (0x216, 'M', 'ȗ'), (0x217, 'V'), (0x218, 'M', 'ș'), (0x219, 'V'), (0x21A, 'M', 'ț'), (0x21B, 'V'), (0x21C, 'M', 'ȝ'), (0x21D, 'V'), (0x21E, 'M', 'ȟ'), (0x21F, 'V'), (0x220, 'M', 'ƞ'), (0x221, 'V'), (0x222, 'M', 'ȣ'), (0x223, 'V'), (0x224, 'M', 'ȥ'), (0x225, 'V'), (0x226, 'M', 'ȧ'), (0x227, 'V'), (0x228, 'M', 'ȩ'), (0x229, 'V'), (0x22A, 'M', 'ȫ'), (0x22B, 'V'), (0x22C, 'M', 'ȭ'), (0x22D, 'V'), (0x22E, 'M', 'ȯ'), (0x22F, 'V'), (0x230, 'M', 'ȱ'), (0x231, 'V'), (0x232, 'M', 'ȳ'), (0x233, 'V'), (0x23A, 'M', 'ⱥ'), (0x23B, 'M', 'ȼ'), (0x23C, 'V'), (0x23D, 'M', 'ƚ'), (0x23E, 'M', 'ⱦ'), (0x23F, 'V'), (0x241, 'M', 'ɂ'), (0x242, 'V'), (0x243, 'M', 'ƀ'), (0x244, 'M', 'ʉ'), (0x245, 'M', 'ʌ'), (0x246, 'M', 'ɇ'), (0x247, 'V'), (0x248, 'M', 'ɉ'), (0x249, 'V'), (0x24A, 'M', 'ɋ'), (0x24B, 'V'), (0x24C, 'M', 'ɍ'), (0x24D, 'V'), (0x24E, 'M', 'ɏ'), (0x24F, 'V'), (0x2B0, 'M', 'h'), (0x2B1, 'M', 'ɦ'), (0x2B2, 'M', 'j'), (0x2B3, 'M', '
1
1,157
_seg_5
54
0
4
21
keras/layers/rnn/gru_test.py
280,211
tf.cond optimization Reformatting Disabling a test that fails on fallback path
keras
13
Python
43
gru_test.py
def _test_runtime_with_model(self, model): (x_train, y_train), _ = test_utils.get_test_data( train_samples=self.batch, test_samples=0, input_shape=(self.timestep, self.input_shape), num_classes=self.output_shape, ) y_train = np_utils.to_categorical(y_train, self.output_shape) model.compile(optimizer="sgd", loss=["categorical_crossentropy", None]) existing_loss = 0 for _ in range(self.epoch): history = model.fit(x_train, y_train) loss_value = history.history["loss"][0] self.assertNotEqual(existing_loss, loss_value) existing_loss = loss_value _, runtime_value = model.predict(x_train) if not tf.sysconfig.get_build_info()["is_rocm_build"]: if tf.test.is_gpu_available(): self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_GPU) else: self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_CPU)
6fed9116cb32d5cd9f10cfa38062cae4a27e4743
181
https://github.com/keras-team/keras.git
249
def _test_runtime_with_model(self, model): (x_train, y_train), _ = test_utils.get_test_data( train_samples=self.batch, test_samples=0, input_shape=(self.timestep, self.input_shape), num_classes=self.output_shape, ) y_train = np_utils.to_categorical(y_train, self.output_shape) model.compile(optimizer="sgd", loss=["categorical_crossentropy", None]) existing_loss = 0 for _ in range(self.epoch): history = model.fit(x_train, y_train) loss_value = history.history["loss"][0] self.assertNotEqual(existing_loss, loss_value) existing_loss = loss_value _, runtime_value = model.predict(x_train) if not
38
279
_test_runtime_with_model