n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
98
0
4
18
python/ray/train/gbdt_trainer.py
125,552
[air] remove unnecessary logs + improve repr for result (#26906)
ray
18
Python
78
gbdt_trainer.py
def preprocess_datasets(self) -> None: super().preprocess_datasets() # XGBoost/LightGBM-Ray requires each dataset to have at least as many # blocks as there are workers. # TODO: Move this logic to the respective libraries for dataset_key, dataset in self.datasets.items(): if dataset.num_blocks() < self._ray_params.num_actors: if dataset.size_bytes() > _WARN_REPARTITION_THRESHOLD: warnings.warn( f"Dataset '{dataset_key}' has {dataset.num_blocks()} blocks, " f"which is less than the `num_workers` " f"{self._ray_params.num_actors}. " f"This dataset will be automatically repartitioned to " f"{self._ray_params.num_actors} blocks. You can disable " "this error message by partitioning the dataset " "to have blocks >= number of workers via " "`dataset.repartition(num_workers)`." ) self.datasets[dataset_key] = dataset.repartition( self._ray_params.num_actors )
d79431e32cffbf3f86da5f7417697dc8edd1da3f
84
https://github.com/ray-project/ray.git
429
def preprocess_datasets(self) -> None: super().preprocess_datasets() # XGBoost/LightGBM-Ray requires each dataset to have at least as many # blocks as there are workers. # TODO: Move this logic to the respective libraries for dataset_key, dataset in self.datasets.items(): if dataset.num_blocks() < self._ray_params.num_actors: if dataset.size_bytes() > _WARN_REPARTITION_THRESHOLD: warnings.warn( f"Dataset '{dataset_key}' has {dataset.num_blocks()} blocks, " f"which is less than the `num_workers` " f"{self._ray_para
15
179
preprocess_datasets
73
0
2
15
pandas/tests/io/test_sql.py
162,993
ENH: to_sql returns rowcount (#45137)
pandas
13
Python
54
test_sql.py
def test_datetime_NaT(self): df = DataFrame( {"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)} ) df.loc[1, "A"] = np.nan assert df.to_sql("test_datetime", self.conn, index=False) == 3 # with read_table -> type information from schema used result = sql.read_sql_table("test_datetime", self.conn) tm.assert_frame_equal(result, df) # with read_sql -> no type information -> sqlite has no native result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn) if self.flavor == "sqlite": assert isinstance(result.loc[0, "A"], str) result["A"] = to_datetime(result["A"], errors="coerce") tm.assert_frame_equal(result, df) else: tm.assert_frame_equal(result, df)
3dfed3fcd552dcbf4daf7f78c82a87638f896512
149
https://github.com/pandas-dev/pandas.git
204
def test_datetime_NaT(self): df = DataFrame( {"A": date_range("2013-01-01 09:00:00", periods=3),
24
244
test_datetime_NaT
98
0
2
20
tests/strategy/test_interface.py
149,926
Enhance hyperoptable strategy to test instance parameters
freqtrade
12
Python
70
test_interface.py
def test_auto_hyperopt_interface(default_conf): default_conf.update({'strategy': 'HyperoptableStrategy'}) PairLocks.timeframe = default_conf['timeframe'] strategy = StrategyResolver.load_strategy(default_conf) strategy.ft_bot_start() with pytest.raises(OperationalException): next(strategy.enumerate_parameters('deadBeef')) assert strategy.buy_rsi.value == strategy.buy_params['buy_rsi'] # PlusDI is NOT in the buy-params, so default should be used assert strategy.buy_plusdi.value == 0.5 assert strategy.sell_rsi.value == strategy.sell_params['sell_rsi'] assert repr(strategy.sell_rsi) == 'IntParameter(74)' # Parameter is disabled - so value from sell_param dict will NOT be used. assert strategy.sell_minusdi.value == 0.5 all_params = strategy.detect_all_parameters() assert isinstance(all_params, dict) assert len(all_params['buy']) == 2 assert len(all_params['sell']) == 2 # Number of Hyperoptable parameters assert all_params['count'] == 6 strategy.__class__.sell_rsi = IntParameter([0, 10], default=5, space='buy') with pytest.raises(OperationalException, match=r"Inconclusive parameter.*"): [x for x in strategy.detect_parameters('sell')]
5bf021be2e8f1479753e66573575fa7cde00a2b6
196
https://github.com/freqtrade/freqtrade.git
171
def test_auto_hyperopt_interface(default_conf): default_conf.update({'strategy': 'HyperoptableStrategy'}) PairLocks.timeframe = default_conf['timeframe'] strategy = StrategyResolver.load_strategy(default_conf) strategy.ft_bot_start() with pytest.raises(OperationalException): next(strategy.enumerate_parameters('deadBeef')) assert strategy.buy_rsi.value == strategy.buy_params['buy_rsi'] # PlusDI is NOT in the buy-params, so default should be use
34
325
test_auto_hyperopt_interface
16
0
2
3
fastai/general_optimizer.py
190,366
Upgrading to support latest Pytorch version
DeOldify
12
Python
16
general_optimizer.py
def _get_val3(self, state, val, param): v = val.view(val.size(0), -1).mean(1) return state.add_(1-param, v) if self.decay else state.add_(v)
4fc3616712edb19179b17dd270ad6cf63abf99c2
54
https://github.com/jantic/DeOldify.git
30
def _get_val3(self, state, val, param): v = val.view(val.size(0), -1).
11
80
_get_val3
81
0
1
21
tests/admin_views/tests.py
207,691
Refs #33476 -- Reformatted code with Black.
django
11
Python
61
tests.py
def test_pk_hidden_fields(self): story1 = Story.objects.create( title="The adventures of Guido", content="Once upon a time in Djangoland..." ) story2 = Story.objects.create( title="Crouching Tiger, Hidden Python", content="The Python was sneaking into...", ) response = self.client.get(reverse("admin:admin_views_story_changelist")) # Only one hidden field, in a separate place than the table. self.assertContains(response, 'id="id_form-0-id"', 1) self.assertContains(response, 'id="id_form-1-id"', 1) self.assertContains( response, '<div class="hiddenfields">\n' '<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">' '<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id">\n</div>' % (story2.id, story1.id), html=True, ) self.assertContains(response, '<td class="field-id">%d</td>' % story1.id, 1) self.assertContains(response, '<td class="field-id">%d</td>' % story2.id, 1)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
125
https://github.com/django/django.git
271
def test_pk_hidden_fields(self): story1 = Story.objects.create( title="The adventures of Guido", content="Once upon a time in Djangoland..." ) story2 = Story.object
16
213
test_pk_hidden_fields
13
0
1
6
py/visdom/__init__.py
106,841
apply black py to all python files
visdom
11
Python
13
__init__.py
def get_window_data(self, win=None, env=None): return self._send( msg={"win": win, "eid": env}, endpoint="win_data", create=False, )
5b8b7f267cfaf76a2a39a727ef31a62b3909a093
40
https://github.com/fossasia/visdom.git
67
def get_window_data(self, win=None, env=None):
8
65
get_window_data
43
0
7
14
nuitka/Caching.py
178,646
Optimization: Make experimental caching bytecode demotion work.
Nuitka
14
Python
33
Caching.py
def getModuleImportableFilesHash(full_name): package_name = full_name.getPackageName() paths = getPackageSearchPath(None) if package_name is not None: paths += getPackageSearchPath(package_name) all_suffixes = getAllModuleSuffixes() result_hash = Hash() for path in paths: if not os.path.isdir(path): continue for fullname, filename in listDir(path): if isPackageDir(fullname) or filename.endswith(all_suffixes): result_hash.updateFromValues(filename, b"\0") return result_hash.asHexDigest()
840959fbec6d897aa7e51f63e1c34e46402ced8b
96
https://github.com/Nuitka/Nuitka.git
125
def getModuleImportableFilesHash(full_name): package_name = full_name.getPackageName() paths = getPackageSearchPath(None) if package_name is not None: paths += getPackageSearchPath(package_name) all_suffixes = getAllModuleSuffixes() result_hash = Hash() for path in paths: if not os.path.isdir(path): continue for fullname, filename in listDir(path): if isPackageDir(fullname) or filename.endswith(all_suffixes): result_hash.updateFromValues(filename, b"\0") return result_hash.asHexDig
20
159
getModuleImportableFilesHash
11
0
1
5
homeassistant/components/lg_netcast/media_player.py
305,410
Improve entity type hints [l] (#77655)
core
7
Python
10
media_player.py
def media_pause(self) -> None: self._playing = False self._state = STATE_PAUSED self.send_command(34)
d1ecd74a1a153b85b829acf45b5c6a5ea79df5c1
24
https://github.com/home-assistant/core.git
39
def media_pause(self) -> None: self._playing = False self._state = STATE_PAUSED self
6
42
media_pause
21
0
2
9
timm/models/vision_transformer.py
331,731
Move DeiT to own file, vit getting crowded. Working towards fixing #1029, make pooling interface for transformers and mlp closer to convnets. Still working through some details...
pytorch-image-models
17
Python
18
vision_transformer.py
def _reset_representation(self, representation_size): self.representation_size = representation_size if self.representation_size: self.pre_logits = nn.Sequential(OrderedDict([ ('fc', nn.Linear(self.embed_dim, self.representation_size)), ('act', nn.Tanh()) ])) else: self.pre_logits = nn.Identity()
5f81d4de234f579bdc988e8346da14b37a3af160
68
https://github.com/huggingface/pytorch-image-models.git
104
def _reset_representation(self, representation_size): self.representation_size = representation_size if self.representation_size: self.pre_logits = nn.Sequential(OrderedDict([
11
109
_reset_representation
15
0
1
3
tests/unit/bokeh/core/property/test_instance.py
212,206
Add Init signatures to Bokeh models (#12035) * Add signatures to Bokeh Model initializers * use explicit type for override default * move InstanceDefault to bokeh.core.properties * enable assertions
bokeh
10
Python
15
test_instance.py
def test___repr__(self) -> None: m = bcpi.InstanceDefault(_TestModel, x=10, z=[10]) assert repr(m) == "<Instance: _TestModel(x=10, z=[10])>"
1b3e6acd6eebd352106cc5ecf5e12dbf90e0607c
32
https://github.com/bokeh/bokeh.git
28
def test___repr__(self) -> None: m =
9
51
test___repr__
105
1
1
36
dash/testing/plugin.py
40,140
Revert "Remove percynofinalize." This reverts commit 8d2b1d3f5eab35f88eba46f6e96de5f484857513.
dash
10
Python
80
plugin.py
def pytest_addoption(parser): dash = parser.getgroup("Dash", "Dash Integration Tests") dash.addoption( "--webdriver", choices=("Chrome", "Firefox"), default="Chrome", help="Name of the selenium driver to use", ) dash.addoption( "--remote", action="store_true", help="instruct pytest to use selenium grid" ) dash.addoption( "--remote-url", action="store", default=SELENIUM_GRID_DEFAULT, help="set a different selenium grid remote url if other than default", ) dash.addoption( "--headless", action="store_true", help="set this flag to run in headless mode" ) dash.addoption( "--percy-assets", action="store", default="tests/assets", help="configure how Percy will discover your app's assets", ) dash.addoption( "--nopercyfinalize", action="store_false", help="set this flag to control percy finalize at CI level", ) dash.addoption( "--pause", action="store_true", help="pause using pdb after opening the test app, so you can interact with it", ) @pytest.mark.tryfirst
d2bd5aa361a1fdff7e8fa3e29568fc295c40489e
@pytest.mark.tryfirst
134
https://github.com/plotly/dash.git
288
def pytest_addoption(parser): dash = parser.getgroup("Dash", "Dash Integration Tests") dash.addoption( "--webdriver", choices=("Chrome", "Firefox"), default="Chrome", help="Name of the selenium driver to use", ) dash.addoption( "--remote", action="store_true", help="instruct pytest to use selenium grid" ) dash.addoption( "--remote-url", action="store", default=SELENIUM_GRID_DEFAULT, help="set a different selenium grid remote url if other than default", ) dash.addoption( "--headless", action="store_true", help="set this flag to run in headless mode" ) dash.addoption( "--percy-assets", action="store", default="tests/assets", help="configure how Percy will
13
251
pytest_addoption
22
0
2
15
bokeh/colors/color.py
212,597
Combine alpha values by multiplying together (#12283) * Combine multiple alpha in color2rgba * Visual integration tests * Update varying_alpha_palette to combine alpha * Include alpha in RGB.to_hex() * Improvements for mypy * More improvements for mypy * Remove unused combine_alpha() from bokehjs
bokeh
13
Python
18
color.py
def to_hex(self) -> str: if self.a < 1.0: return "#%02X%02X%02X%02X" % (self.r, self.g, self.b, round(self.a*255)) else: return "#%02X%02X%02X" % (self.r, self.g, self.b)
91fbc852c5ed7245e661e3075310f79246aac09a
60
https://github.com/bokeh/bokeh.git
65
def to_hex(self) -> str: if self.a < 1.0: return "#%02X%02X%02X%02X" % (self.r, self.g, self.b, round(self.a*255)
8
92
to_hex
26
0
1
7
tests/aggregation/tests.py
200,894
Refs #33476 -- Reformatted code with Black.
django
11
Python
24
tests.py
def test_sum_distinct_aggregate(self): authors = Author.objects.filter(book__in=[self.b5, self.b6]) self.assertEqual(authors.count(), 3) distinct_authors = authors.distinct() self.assertEqual(distinct_authors.count(), 2) # Selected author ages are 57 and 46 age_sum = distinct_authors.aggregate(Sum("age")) self.assertEqual(age_sum["age__sum"], 103)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
79
https://github.com/django/django.git
82
def test_sum_distinct_aggregate(self): authors = Author.objects.filter(book__in=[self.b5, self.b6]) self.assertEqual(authors.count(), 3)
16
132
test_sum_distinct_aggregate
34
0
1
16
src/emailservice/demo_pb2_grpc.py
190,745
chore(deps): manual upgrade of dependencies (#1396) * chore(deps): manual upgrade of dependencies accumulated upgrades from renovate PRs #997, #1094, #1095, #1193, #1379, #1384, #1387, #1388, #1389, * chore(deps): fix dependencies due to requests constraint the charset-normalizer is rolled back to lates of major version 2 (2.1.1). add importlib-metadata dependency. update opentelemetry-* to the version supported by 1.15.0 of the SDK. rollback to docker 3.10 for email & recommendation cause profiler package does not support python 3.11 regenerate email and recommendation svs protobuf * chore(deps): adding changes from #1402 and #1403
microservices-demo
13
Python
31
demo_pb2_grpc.py
def add_CurrencyServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'GetSupportedCurrencies': grpc.unary_unary_rpc_method_handler( servicer.GetSupportedCurrencies, request_deserializer=demo__pb2.Empty.FromString, response_serializer=demo__pb2.GetSupportedCurrenciesResponse.SerializeToString, ), 'Convert': grpc.unary_unary_rpc_method_handler( servicer.Convert, request_deserializer=demo__pb2.CurrencyConversionRequest.FromString, response_serializer=demo__pb2.Money.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'hipstershop.CurrencyService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API.
c6105dbd7210286dde07aa0a09381bf99840acca
86
https://github.com/GoogleCloudPlatform/microservices-demo.git
214
def add_CurrencyServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'GetSupportedCurrencies': grpc.unary_unary_rpc_method_handler( servicer.GetSupportedCurrencies, request_deserializer=demo__pb2.Empty.FromString, response_serializer=demo__pb2.GetSupported
20
132
add_CurrencyServiceServicer_to_server
91
0
4
33
mindsdb/integrations/mysql/mysql.py
114,048
fix dtypes casting
mindsdb
15
Python
68
mysql.py
def _to_mysql_table(self, dtype_dict, predicted_cols, columns): subtype_map = { dtype.integer: 'int', dtype.float: 'double', dtype.binary: 'bool', dtype.date: 'Date', dtype.datetime: 'Datetime', dtype.binary: 'VARCHAR(500)', dtype.categorical: 'VARCHAR(500)', dtype.tags: 'VARCHAR(500)', dtype.image: 'VARCHAR(500)', dtype.video: 'VARCHAR(500)', dtype.audio: 'VARCHAR(500)', dtype.short_text: 'VARCHAR(500)', dtype.rich_text: 'VARCHAR(500)', dtype.quantity: 'VARCHAR(500)', dtype.num_array: 'VARCHAR(500)', dtype.cat_array: 'VARCHAR(500)', dtype.num_tsarray: 'VARCHAR(500)', dtype.cat_tsarray: 'VARCHAR(500)', 'default': 'VARCHAR(500)' } column_declaration = [] for name in columns: try: col_subtype = dtype_dict[name] new_type = subtype_map.get(col_subtype, subtype_map.get('default')) column_declaration.append(f' `{name}` {new_type} ') if name in predicted_cols: column_declaration.append(f' `{name}_original` {new_type} ') except Exception as e: log.error(f'Error: can not determine mysql data type for column {name}: {e}') return column_declaration
80d671fae9c05521009615a2bfff1b760a98debd
191
https://github.com/mindsdb/mindsdb.git
450
def _to_mysql_table(self, dtype_dict, predicted_cols, columns): subtype_map = { dtype.integer: 'int', dtype.float: 'double', dtype.binary: 'bool', dtype.date: 'Date', dtype.datetime: 'Datetime', dtype.binary: 'VARCHAR(500)', dtype.categorical: 'VARCHAR(500)', dtype.tags: 'VARCHAR(500)', dtype.image: 'VARCHAR(500)', dtype.video: 'VARCHAR(500)', dtype.audio: 'VARCHAR(500)', dtype.short_text: 'VARCHAR(500)', dtype.rich_text: 'VARCHAR(500)', dtype.quantity: 'VARCHAR(500)', dtype.num_array: 'VARCHAR(500)', dtype.cat_array: 'VARCHAR(500)', dtype.num_tsarray: 'VARCHAR(500)', dtype.cat_tsarray: 'VARCHAR(500)', 'default': 'VARCHAR(500)' } column_declaration = [] for name in columns: try: col_subtype = dtype_dict[name] new_type = subtype_map.get(col_subtype, subtype_map.get('default')) column_declaration.append(f' `{name}` {new_type} ') if name in predicted_cols: column_declaration.append(f' `{name}_
34
342
_to_mysql_table
33
1
1
23
saleor/webhook/observability/tests/test_obfuscation.py
27,579
Observability reporter (#9803) * Initial commit * Add observability celery beat task * Add observability_reporter_task and observability_send_events * Convert payload to camel case * Add fakeredis to dev dependencies * Add redis buffer tests * Refactor buffer * Update * Optimize buffer * Add tests * Add types-redis to dev dependencies * Refactor * Fix after rebase * Refactor opentracing * Add opentracing to observability tasks * Add more tests * Fix buffer fixtures * Report dropped events * Fix buffer tests * Refactor get_buffer * Refactor unit tests * Set Redis connection client_name * Refactor redis tests * Fix test_get_or_create_connection_pool * Fix JsonTruncText comparison * Add more generate_event_delivery_attempt_payload tests
saleor
11
Python
29
test_obfuscation.py
def test_anonymize_gql_operation_response_with_fragment_spread(gql_operation_factory): query = result = {"data": "result"} sensitive_fields = {"Product": {"name"}} operation_result = gql_operation_factory(query, result=result) anonymize_gql_operation_response(operation_result, sensitive_fields) assert operation_result.result["data"] == MASK @pytest.mark.parametrize( "sensitive_fields", [ {"NonExistingType": {}}, {"Product": {"nonExistingField"}}, {"Node": {"id"}}, ], )
7ea7916c65357741c3911e307acb58d547a5e91a
@pytest.mark.parametrize( "sensitive_fields", [ {"NonExistingType": {}}, {"Product": {"nonExistingField"}}, {"Node": {"id"}}, ], )
49
https://github.com/saleor/saleor.git
79
def test_anonymize_gql_operation_response_with_fragment_spread(gql_operation_factory): query = result = {"data": "result"} sensitive_fields = {"Product": {"name"}} operation_result = gql_operation_factory(query, result=result) anonymize_gql_operation_response(operation_result, sensitive_fields) assert operation_result.result["data"] == MASK @pytest.mark.parametrize( "sensitive_fields", [ {"NonExistingType": {}}, {"Product": {"nonExistingField"}}, {"Node": {"id"}}, ], )
11
152
test_anonymize_gql_operation_response_with_fragment_spread
13
0
2
5
awx/main/analytics/subsystem_metrics.py
81,254
Add subsystem metrics for task manager
awx
9
Python
13
subsystem_metrics.py
def observe(self, field, value): self.METRICS[field].observe(value) self.metrics_have_changed = True if self.auto_pipe_execute is True: self.pipe_execute()
2f82b757483cf67829a8c0ed843b51d126ec658e
37
https://github.com/ansible/awx.git
44
def observe(self, field, value): self.METRICS[field].observe(value) self.metrics_have_changed = True if self.auto_pipe_execute is True: self.pipe_execute()
8
58
observe
83
0
7
30
ppocr/postprocess/table_postprocess.py
23,988
add TableMaster
PaddleOCR
15
Python
57
table_postprocess.py
def decode_label(self, batch): structure_idx = batch[1] gt_bbox_list = batch[2] shape_list = batch[-1] ignored_tokens = self.get_ignored_tokens() end_idx = self.dict[self.end_str] structure_batch_list = [] bbox_batch_list = [] batch_size = len(structure_idx) for batch_idx in range(batch_size): structure_list = [] bbox_list = [] for idx in range(len(structure_idx[batch_idx])): char_idx = int(structure_idx[batch_idx][idx]) if idx > 0 and char_idx == end_idx: break if char_idx in ignored_tokens: continue structure_list.append(self.character[char_idx]) bbox = gt_bbox_list[batch_idx][idx] if bbox.sum() != 0: bbox = self._bbox_decode(bbox, shape_list[batch_idx]) bbox_list.append(bbox) structure_batch_list.append(structure_list) bbox_batch_list.append(bbox_list) result = { 'bbox_batch_list': bbox_batch_list, 'structure_batch_list': structure_batch_list, } return result
a0c33908d500fe893d8e79e11399a5ab665f330b
190
https://github.com/PaddlePaddle/PaddleOCR.git
417
def decode_label(self, batch): structure_idx = batch[1] gt_bbox_list = batch[2] shape_list = batch[-1] ignored_tokens = self.get_ignored_tokens() end_idx = self.dict[self.end_str] structure_batch_list = [] bbox_batch_list = [] batch_size = len(structure_idx) for batch_idx in range(batch_size): structure_list = [] bbox_list = [] for idx in range(len(structure_idx[batch_idx])): char_idx = int(structure_idx[batch_idx][idx]) if idx > 0 and char_idx == end_idx: break if char_idx in ignored_tokens: continue structure_list.append(self.character[char_idx]) bbox = gt_bbox_list[batch_idx][idx] if bbox.sum() != 0: bbox = self._bbox_decode(bbox, shape_list[batch_idx]) bbox_list.append(bbox) structure_batch_list.append(structure_list) bbox_batch_list.append(bbox_list) result = { 'bbox_batch_list': bbox_batch_list, 'structure_batch_list': structure_batch_list, } return result
28
304
decode_label
15
0
1
6
src/sentry/search/events/datasets/metrics.py
97,067
feat(mep): Add the team-key-transaction alias (#32593) - This moves some of our aliases to their own files, the intention is to eventually migrate more of the class methods there this gives us a few benefits - We can unit test the resolverse now - They're easier to re-use across datasets
sentry
8
Python
14
metrics.py
def field_alias_converter(self) -> Mapping[str, Callable[[str], SelectType]]: return { constants.PROJECT_ALIAS: self._resolve_project_slug_alias, constants.PROJECT_NAME_ALIAS: self._resolve_project_slug_alias, constants.TEAM_KEY_TRANSACTION_ALIAS: self._resolve_team_key_transaction_alias, }
ebf2d415cbbbfc4473bcc304f0032f805969a0f9
46
https://github.com/getsentry/sentry.git
61
def field_alias_converter(self) -> Mapping[str, Callable[[str], SelectType]]: return { constants.PROJECT_ALIAS: self._resolve_project_slug_alias, constants.PROJECT_NAME_ALIAS: self._resolve_project_slug_alias, constants.TEAM_KEY_TRANSACTION_ALIAS: self._resolve_t
12
65
field_alias_converter
13
0
1
6
tests/snapshot_tests/test_snapshots.py
186,352
more pauses for demo?
textual
10
Python
12
test_snapshots.py
def test_demo(snap_compare): assert snap_compare( Path("../../src/textual/demo.py"), press=["down", "down", "down", "_", "_"], terminal_size=(100, 30), )
91e23ff34c2f34da291c7cadf26dcb5fbde4e439
37
https://github.com/Textualize/textual.git
43
def test_demo(snap_compare):
5
65
test_demo
660
0
51
191
deploy/pphuman/pipeline.py
210,930
ppvehicle plate license
PaddleDetection
20
Python
349
pipeline.py
def predict_video(self, video_file): # mot # mot -> attr # mot -> pose -> action capture = cv2.VideoCapture(video_file) video_out_name = 'output.mp4' if self.file_name is None else self.file_name # Get Video info : resolution, fps, frame count width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = int(capture.get(cv2.CAP_PROP_FPS)) frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) print("video fps: %d, frame_count: %d" % (fps, frame_count)) if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) out_path = os.path.join(self.output_dir, video_out_name) fourcc = cv2.VideoWriter_fourcc(* 'mp4v') writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height)) frame_id = 0 entrance, records, center_traj = None, None, None if self.draw_center_traj: center_traj = [{}] id_set = set() interval_id_set = set() in_id_list = list() out_id_list = list() prev_center = dict() records = list() entrance = [0, height / 2., width, height / 2.] video_fps = fps video_action_imgs = [] if self.with_video_action: short_size = self.cfg["VIDEO_ACTION"]["short_size"] scale = ShortSizeScale(short_size) while (1): if frame_id % 10 == 0: print('frame id: ', frame_id) ret, frame = capture.read() if not ret: break if self.modebase["idbased"] or self.modebase["skeletonbased"]: if frame_id > self.warmup_frame: self.pipe_timer.total_time.start() self.pipe_timer.module_time['mot'].start() res = self.mot_predictor.predict_image( [copy.deepcopy(frame)], visual=False) if frame_id > self.warmup_frame: self.pipe_timer.module_time['mot'].end() # mot output format: id, class, score, xmin, ymin, xmax, ymax mot_res = parse_mot_res(res) # flow_statistic only support single class MOT boxes, scores, ids = res[0] # batch size = 1 in MOT mot_result = (frame_id + 1, boxes[0], scores[0], ids[0]) # single class statistic = flow_statistic( mot_result, self.secs_interval, self.do_entrance_counting, video_fps, entrance, id_set, interval_id_set, in_id_list, out_id_list, prev_center, records) records = statistic['records'] # nothing detected if len(mot_res['boxes']) == 0: frame_id += 1 if frame_id > self.warmup_frame: self.pipe_timer.img_num += 1 self.pipe_timer.total_time.end() if self.cfg['visual']: _, _, fps = self.pipe_timer.get_total_time() im = self.visualize_video(frame, mot_res, frame_id, fps, entrance, records, center_traj) # visualize writer.write(im) if self.file_name is None: # use camera_id cv2.imshow('PPHuman&&PPVehicle', im) if cv2.waitKey(1) & 0xFF == ord('q'): break continue self.pipeline_res.update(mot_res, 'mot') crop_input, new_bboxes, ori_bboxes = crop_image_with_mot( frame, mot_res) if self.with_vehicleplate: platelicense = self.vehicleplate_detector.get_platelicense( crop_input) self.pipeline_res.update(platelicense, 'vehicleplate') if self.with_attr: if frame_id > self.warmup_frame: self.pipe_timer.module_time['attr'].start() attr_res = self.attr_predictor.predict_image( crop_input, visual=False) if frame_id > self.warmup_frame: self.pipe_timer.module_time['attr'].end() self.pipeline_res.update(attr_res, 'attr') if self.with_idbased_detaction: if frame_id > self.warmup_frame: self.pipe_timer.module_time['det_action'].start() det_action_res = self.det_action_predictor.predict( crop_input, mot_res) if frame_id > self.warmup_frame: self.pipe_timer.module_time['det_action'].end() self.pipeline_res.update(det_action_res, 'det_action') if self.cfg['visual']: self.det_action_visual_helper.update(det_action_res) if self.with_idbased_clsaction: if frame_id > self.warmup_frame: self.pipe_timer.module_time['cls_action'].start() cls_action_res = self.cls_action_predictor.predict_with_mot( crop_input, mot_res) if frame_id > self.warmup_frame: self.pipe_timer.module_time['cls_action'].end() self.pipeline_res.update(cls_action_res, 'cls_action') if self.cfg['visual']: self.cls_action_visual_helper.update(cls_action_res) if self.with_skeleton_action: if frame_id > self.warmup_frame: self.pipe_timer.module_time['kpt'].start() kpt_pred = self.kpt_predictor.predict_image( crop_input, visual=False) keypoint_vector, score_vector = translate_to_ori_images( kpt_pred, np.array(new_bboxes)) kpt_res = {} kpt_res['keypoint'] = [ keypoint_vector.tolist(), score_vector.tolist() ] if len(keypoint_vector) > 0 else [[], []] kpt_res['bbox'] = ori_bboxes if frame_id > self.warmup_frame: self.pipe_timer.module_time['kpt'].end() self.pipeline_res.update(kpt_res, 'kpt') self.kpt_buff.update(kpt_res, mot_res) # collect kpt output state = self.kpt_buff.get_state( ) # whether frame num is enough or lost tracker skeleton_action_res = {} if state: if frame_id > self.warmup_frame: self.pipe_timer.module_time[ 'skeleton_action'].start() collected_keypoint = self.kpt_buff.get_collected_keypoint( ) # reoragnize kpt output with ID skeleton_action_input = parse_mot_keypoint( collected_keypoint, self.coord_size) skeleton_action_res = self.skeleton_action_predictor.predict_skeleton_with_mot( skeleton_action_input) if frame_id > self.warmup_frame: self.pipe_timer.module_time['skeleton_action'].end() self.pipeline_res.update(skeleton_action_res, 'skeleton_action') if self.cfg['visual']: self.skeleton_action_visual_helper.update( skeleton_action_res) if self.with_mtmct and frame_id % 10 == 0: crop_input, img_qualities, rects = self.reid_predictor.crop_image_with_mot( frame, mot_res) if frame_id > self.warmup_frame: self.pipe_timer.module_time['reid'].start() reid_res = self.reid_predictor.predict_batch(crop_input) if frame_id > self.warmup_frame: self.pipe_timer.module_time['reid'].end() reid_res_dict = { 'features': reid_res, "qualities": img_qualities, "rects": rects } self.pipeline_res.update(reid_res_dict, 'reid') else: self.pipeline_res.clear('reid') if self.with_video_action: # get the params frame_len = self.cfg["VIDEO_ACTION"]["frame_len"] sample_freq = self.cfg["VIDEO_ACTION"]["sample_freq"] if sample_freq * frame_len > frame_count: # video is too short sample_freq = int(frame_count / frame_len) # filter the warmup frames if frame_id > self.warmup_frame: self.pipe_timer.module_time['video_action'].start() # collect frames if frame_id % sample_freq == 0: # Scale image scaled_img = scale(frame) video_action_imgs.append(scaled_img) # the number of collected frames is enough to predict video action if len(video_action_imgs) == frame_len: classes, scores = self.video_action_predictor.predict( video_action_imgs) if frame_id > self.warmup_frame: self.pipe_timer.module_time['video_action'].end() video_action_res = {"class": classes[0], "score": scores[0]} self.pipeline_res.update(video_action_res, 'video_action') print("video_action_res:", video_action_res) video_action_imgs.clear() # next clip self.collector.append(frame_id, self.pipeline_res) if frame_id > self.warmup_frame: self.pipe_timer.img_num += 1 self.pipe_timer.total_time.end() frame_id += 1 if self.cfg['visual']: _, _, fps = self.pipe_timer.get_total_time() im = self.visualize_video(frame, self.pipeline_res, frame_id, fps, entrance, records, center_traj) # visualize writer.write(im) if self.file_name is None: # use camera_id cv2.imshow('PPHuman', im) if cv2.waitKey(1) & 0xFF == ord('q'): break writer.release() print('save result to {}'.format(out_path))
f2a883edd26c4630672c32415d9c5334846a4b5c
1,477
https://github.com/PaddlePaddle/PaddleDetection.git
4,259
def predict_video(self, video_file): # mot # mot -> attr # mot -> pose -> action capture = cv2.VideoCapture(video_file) video_out_name = 'output.mp4' if self.file_name is None else self.file_name # Get Video info : resolution, fps, frame count width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = int(capture.get(cv2.CAP_PROP_FPS)) frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) print("video fps: %d, frame_count: %d" % (fps, frame_count)) if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) out_path = os.path.join(self.output_dir, video_out_name) fourcc = cv2.VideoWriter_fourcc(* 'mp4v') writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height)) frame_id = 0 entrance, records, center_traj = None, None, None if self.draw_center_traj: center_traj = [{}] id_set = set() interval_id_set = set() in_id_list = list() out_id_list = list() prev_center = dict() records = list() entrance = [0, height / 2., width, height / 2.] video_fps = fps video_action_imgs = [] if self.with_video_action: short_size = self.cfg["VIDEO_ACTION"]["short_size"] scale = ShortSizeScale(short_size) while (1): if frame_id % 10 == 0: print('frame id: ', frame_id) ret, frame = capture.read() if not ret: break if self.modebase["idbased"] or self.modebase["skeletonbased"]: if frame_id > self.warmup_frame: self.pipe_timer.total_time.start() self.pipe_timer.module_time['mot'].start() res = self.mot_predictor.predict_image( [copy.deepcopy(frame)], visual=False) if frame_id > self.warmup_frame: self.pipe_timer.module_time['mot'].end() # mot output format: id, class, score, xmin, ymin, xmax, ymax mot_res = parse_mot_res(res) # flow_statistic only support single class MOT boxes, scores, ids = res[0] # batch size = 1 in MOT mot_result = (frame_id + 1, boxes[0], scores[0], ids[0]) # single class statistic = flow_statistic( mot_result, self.secs_interval, self.do_entrance_counting, video_fps, entrance, id_set, interval_id_set, in_id_list, out_id_list, prev_center, records) records = statistic['records'] # nothing detected if len(mot_res['boxes']) == 0: frame_id += 1 if frame_id > self.warmup_frame: self.pipe_timer.img_num += 1 self.pipe_timer.total_time.end() if self.cfg['visual']: _, _, fps = self.pipe_timer.get_total_time() im = self.visualize_video(frame, mot_res, frame_id, fps, entrance, records, center_traj) # visualize writer.write(im) if self.file_name is None: # use camera_id cv2.imshow('PPHuman&&PPVehicle', im) if cv2.waitKey(1) & 0xFF == ord('q'): break continue self.pipeline_res.update(mot_res, 'mot') crop_input, new_bboxes, ori_bboxes = crop_image_with_mot( frame, mot_res) if self.with_vehicleplate: platelicense = self.vehicleplate_detector.get_platelicense( crop_input) self.pipeline_res.update(platelicense, 'vehicleplate') if self.with_attr: if frame_id > self.warmup_frame: self.pipe_timer.module_time['attr'].start() attr_res = self.attr_predictor.predict_image( crop_input, visual=False) if frame_id > self.warmup_frame: self.pipe_timer.module_time['attr'].end() self.pipeline_res.update(attr_res, 'attr') if self.with_idbased_detaction: if frame_id > self.warmup_frame: self.pipe_timer.module_time['det_action'].start() det_action_res = self.det_action_predictor.predict( crop_input, mot_res) if frame_id > self.warmup_frame: self.pipe_timer.module_time['det_action'].end() self.pipeline_res.update(det_action_res, 'det_action') if self.cfg['visual']: self.det_action_visual_helper.update(det_action_res) if self.with_idbased_clsaction: if frame_id > self.warmup_frame: self.pipe_timer.module_time['cls_action'].start() cls_action_res = self.cls_action_predictor.predict_with_mot( crop_input, mot_res) if frame_id > self.warmup_frame: self.pipe_timer.module_time['cls_action'].end() self.pipeline_res.update(cls_action_res, 'cls_action') if self.cfg['visual']: self.cls_action_visual_helper.update(cls_action_res) if self.with_skeleton_action: if frame_id > self.warmup_frame: self.pipe_timer.module_time['kpt'].start() kpt_pred = self.kpt_predictor.predict_image( crop_input, visual=False) keypoint_vector, score_vector = translate_to_ori_images( kpt_pred, np.array(new_bboxes)) kpt_res = {} kpt_res['keypoint'] = [ keypoint_vector.tolist(), score_vector.tolist() ] if len(keypoint_vector) > 0 else [[], []] kpt_res['bbox'] = ori_bboxes if frame_id > self.warmup_frame: self.pipe_timer.module_time['kpt'].end() self.pipeline_res.update(kpt_res, 'kpt') self.kpt_buff.update(kpt_res, mot_res) # collect kpt output state = self.kpt_buff.get_state( ) # whether frame num is enough or lost tracker skeleton_action_res = {} if state: if frame_id > self.warmup_frame: self.pipe_timer.module_time[ 'skeleton_action'].start() collected_keypoint = self.kpt_buff.get_collected_keypoint( ) # reoragnize kpt output with ID skeleton_action_input = parse_mot_keypoint( collected_keypoint, self.coord_size) skeleton_action_res = self.skeleton_action_predictor.predict_skeleton_with_mot( skeleton_action_input) if frame_id > self.warmup_frame: self.pipe_timer.module_time['skeleton_action'].end() self.pipeline_res.update(skeleton_action_res, 'skeleton_action') if self.cfg['visual']: self.skeleton_action_visual_helper.update( skeleton_action_res) if self.with_mtmct and frame_id % 10 == 0: crop_input, img_qualities, rects = self.reid_predictor.crop_image_with_mot( frame, mot_res) if frame_id > self.warmup_frame: self.pipe_timer.module_time['reid'].start() reid_res = self.reid_predictor.predict_batch(crop_input) if frame_id > self.warmup_frame: self.pipe_timer.module_time['reid'].end() reid_res_dict = { 'features': reid_res, "qualities": img_qualities, "rects": rects } self.pipeline_res.update(reid_res_dict, 'reid') else: self.pipeline_res.clear('reid') if self.with_video_action: # get the params frame_len = self.cfg["VIDEO_ACTION"]["frame_len"] sample_freq = self.cfg["VIDEO_ACTION"]["sample_freq"] if sample_freq * frame_len > frame_count: # video is too short sample_freq = int(frame_count / frame_len) # filter the warmup frames if frame_id > self.warmup_frame: self.pipe_timer.module_time['video_action'].start() # collect frames if frame_id % sample_freq == 0: # Scale image scaled_img = scale(frame) video_action_imgs.append(scaled_img) # the number of collected frames is enough to predict video action if len(video_action_imgs) == frame_len: classes, scores = self.video_action_predictor.predict( video_action_imgs) if frame_id > self.warmup_frame: self.pipe_timer.module_time['video_action'].end() video_action_res = {"class": classes[0], "score": scores[0]} self.pipeline_res.update(video_action_res, 'video_action') print("video_action_res:", video_action_res) video_action_imgs.clear() # next clip self.collector.append(frame_id, self.pipeline_res) if frame_id > self.warmup_frame: self.pipe_timer.img_num += 1 self.pipe_timer.total_time.end() frame_id += 1 if self.cfg['visual']: _, _, fps = self.pipe_timer.get_total_time() im = self.visualize_video(frame, self.pipeline_res, frame_id, fps, entrance, records, center_traj) # visualize writer.write(im) if self.file_name is None: # use camera_id
149
2,413
predict_video
130
0
9
41
mindsdb/interfaces/database/integrations.py
115,528
better dependencies installation
mindsdb
19
Python
86
integrations.py
def _get_handler_meta(self, module): handler_dir = Path(module.__path__[0]) handler_folder_name = handler_dir.name handler_name = handler_folder_name if handler_name.endswith('_handler'): handler_name = handler_name[:-8] dependencies = self._read_dependencies(handler_dir) self.handler_modules[module.name] = module import_error = None if hasattr(module, 'import_error'): import_error = module.import_error handler_meta = { 'import': { 'success': import_error is None, 'folder': handler_folder_name, 'dependencies': dependencies }, 'version': module.version } if import_error is not None: handler_meta['import']['error_message'] = str(import_error) for attr in ('connection_args_example', 'connection_args', 'description', 'name', 'type', 'title'): if hasattr(module, attr): handler_meta[attr] = getattr(module, attr) # region icon if hasattr(module, 'icon_path'): icon_path = handler_dir.joinpath(module.icon_path) handler_meta['icon'] = { 'name': icon_path.name, 'type': icon_path.name[icon_path.name.rfind('.') + 1:].lower() } if handler_meta['icon']['type'] == 'svg': with open(str(icon_path), 'rt') as f: handler_meta['icon']['data'] = f.read() else: with open(str(icon_path), 'rb') as f: handler_meta['icon']['data'] = base64.b64encode(f.read()).decode('utf-8') # endregion if handler_meta.get('name') in ('files', 'views', 'lightwood'): handler_meta['permanent'] = True else: handler_meta['permanent'] = False return handler_meta
f8e08f9509befc7b7ddfda5fccbd1b9b72c9b5f9
317
https://github.com/mindsdb/mindsdb.git
567
def _get_handler_meta(self, module): handler_dir = Path(module.__path__[0]) handler_folder_name = handler_dir.name handler_name = handler_folder_name if handler_name.endswith('_handler'): handler_name = handler_name[:-8] dependencies = s
31
558
_get_handler_meta
63
0
1
20
python/ray/tune/examples/mnist_pytorch.py
132,130
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
13
Python
48
mnist_pytorch.py
def get_data_loaders(): mnist_transforms = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ) # We add FileLock here because multiple workers will want to # download data, and this may cause overwrites since # DataLoader is not threadsafe. with FileLock(os.path.expanduser("~/data.lock")): train_loader = torch.utils.data.DataLoader( datasets.MNIST( "~/data", train=True, download=True, transform=mnist_transforms ), batch_size=64, shuffle=True, ) test_loader = torch.utils.data.DataLoader( datasets.MNIST( "~/data", train=False, download=True, transform=mnist_transforms ), batch_size=64, shuffle=True, ) return train_loader, test_loader
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
130
https://github.com/ray-project/ray.git
236
def get_data_loaders(): mnist_transforms = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ) # We add FileLock here because multiple workers will want to # download data, and this may cause overwrites since # DataLoader is not threadsafe. with FileLock(os.path.expanduser("~/data.lock")): train_loader = torch.utils.data.DataLoader( datasets.MNIST( "~/data", train=True, download=True, transform=mnist_transforms ), batch_size=64, shuffle=True,
23
195
get_data_loaders
17
0
2
5
packages/syft/src/syft/core/adp/vm_private_scalar_manager.py
280
Fix serialize
PySyft
12
Python
17
vm_private_scalar_manager.py
def get(self, index) -> int: while index > len(self.prime_numbers)-1: self.exp += 1 self.prime_numbers = primes(10**self.exp) return self.prime_numbers[index]
8436a4bbdd900476b4f85cad7024ef4e2e964352
45
https://github.com/OpenMined/PySyft.git
52
def get(self, index) -> int: while index > len(self.prime_numbers)-1: self.exp += 1 self.prime_numbers = primes(10**self.exp) return
8
70
get
27
0
1
9
modules/image/text_recognition/ch_pp-ocrv3_det/test.py
52,684
update ch_pp-ocrv3_det (#2173) * update ch_pp-ocrv3_det * update
PaddleHub
13
Python
22
test.py
def test_detect_text4(self): results = self.module.detect_text( images=[cv2.imread('tests/test.jpg')], use_gpu=False, visualization=True, ) self.assertEqual( results[0]['data'], [[[261, 202], [376, 202], [376, 239], [261, 239]], [[283, 162], [352, 162], [352, 202], [283, 202]]])
4382eee60dbee6cb153822a4cb839693e59091bf
99
https://github.com/PaddlePaddle/PaddleHub.git
102
def test_detect_text4(self): results = self.module.detect_text( images=[cv2.imread('tes
11
137
test_detect_text4
28
0
2
6
nni/compression/pytorch/utils/config_validation_v1.py
113,671
[Compression] remove pruning v1 & refactor directory (#5228)
nni
16
Python
27
config_validation_v1.py
def validate_op_types(model, op_types, logger): found_types = set(['default']) | set(map(lambda x: type(x[1]).__name__, model.named_modules())) not_found_op_types = list(set(op_types) - found_types) if not_found_op_types: logger.warning('op_types %s not found in model', not_found_op_types) return True
d68c786ff81bad19c04619d6a999ff34aaa724e7
66
https://github.com/microsoft/nni.git
46
def validate_op_types(model, op_types, logger): found_types = set(['default']) | set(map(lambda x: type(x[1]).__name__, model.named_modules())) not_found_op_types = list(set(op_types) - found_types) if not_found_op_types: logger.warning('op_types %s not found in model', not_found_op_types) return True
14
107
validate_op_types
54
0
1
21
test/test_inputs.py
179,901
inputs
gradio
11
Python
43
test_inputs.py
def test_as_component(self): bool_input = gr.inputs.Checkbox() self.assertEqual(bool_input.preprocess(True), True) self.assertEqual(bool_input.preprocess_example(True), True) self.assertEqual(bool_input.serialize(True, True), True) with tempfile.TemporaryDirectory() as tmpdirname: to_save = bool_input.save_flagged(tmpdirname, "bool_input", True, None) self.assertEqual(to_save, True) restored = bool_input.restore_flagged(tmpdirname, to_save, None) self.assertEqual(restored, True) self.assertIsInstance(bool_input.generate_sample(), bool) bool_input = gr.inputs.Checkbox(default=True, label="Check Your Input") self.assertEqual( bool_input.get_template_context(), { "default_value": True, "name": "checkbox", "label": "Check Your Input", "css": {}, }, )
04b6b80b3361a14eaee4a064bccc25494332e83c
165
https://github.com/gradio-app/gradio.git
253
def test_as_component(self): bool_input = gr.
23
267
test_as_component
26
0
1
14
tests/queries/test_qs_combinators.py
203,069
Fixed #29338 -- Allowed using combined queryset in Subquery. Thanks Eugene Kovalev for the initial patch, Simon Charette for the review, and Chetan Khanna for help.
django
20
Python
24
test_qs_combinators.py
def test_union_in_subquery(self): ReservedName.objects.bulk_create([ ReservedName(name='rn1', order=8), ReservedName(name='rn2', order=1), ReservedName(name='rn3', order=5), ]) qs1 = Number.objects.filter(num__gt=7, num=OuterRef('order')) qs2 = Number.objects.filter(num__lt=2, num=OuterRef('order')) self.assertCountEqual( ReservedName.objects.annotate( number=Subquery(qs1.union(qs2).values('num')), ).filter(number__isnull=False).values_list('order', flat=True), [8, 1], )
30a01441347d5a2146af2944b29778fa0834d4be
137
https://github.com/django/django.git
148
def test_union_in_subquery(self): ReservedName.objects.bulk_create([ ReservedName(name='rn1', order=8), ReservedName(name='rn2', order=1), ReservedName(name='rn3', or
24
219
test_union_in_subquery
10
0
1
4
homeassistant/components/motion_blinds/cover.py
294,228
Motion request update till stop (#68580) * update untill stop * fixes * fix spelling
core
8
Python
10
cover.py
def set_absolute_position(self, **kwargs): position = kwargs[ATTR_ABSOLUTE_POSITION] self._blind.Set_position(100 - position) self.request_position_till_stop()
83983bc875445d7147cb98e70f1214c6ed270da9
30
https://github.com/home-assistant/core.git
38
def set_absolute_position(self, **kwargs): position = kwargs
8
51
set_absolute_position
30
1
1
5
tests/unit/javascript/test_greasemonkey.py
320,967
greasemonkey: Don't implicitly load scripts Needed for #7245 and also seems like cleaner code.
qutebrowser
11
Python
25
test_greasemonkey.py
def test_all(gm_manager): _save_script(test_gm_script, 'test.user.js') gm_manager.load_scripts() assert (gm_manager.all_scripts()[0].name == "qutebrowser test userscript") @pytest.mark.parametrize("url, expected_matches", [ # included ('http://trolol.com/', 1), # neither included nor excluded ('http://aaaaaaaaaa.com/', 0), # excluded ('https://badhost.xxx/', 0), ])
21419c9ef5a90ea36a27afaf2503a57f8f9f8536
@pytest.mark.parametrize("url, expected_matches", [ # included ('http://trolol.com/', 1), # neither included nor excluded ('http://aaaaaaaaaa.com/', 0), # excluded ('https://badhost.xxx/', 0), ])
32
https://github.com/qutebrowser/qutebrowser.git
69
def test_all(gm_manager): _save_script(test_gm_script, 'test.user.js') gm_manager.load_scripts() assert (gm_manager.all_scripts()[0].name == "qutebrowser test userscript") @pytest.mark.parametrize("url, expected
10
109
test_all
51
0
7
18
erpnext/regional/india/e_invoice/utils.py
69,018
test: Add test for einvoice discounts
erpnext
15
Python
38
utils.py
def get_invoice_value_details(invoice): invoice_value_details = frappe._dict(dict()) invoice_value_details.base_total = abs(sum([i.taxable_value for i in invoice.get("items")])) if ( invoice.apply_discount_on == "Grand Total" and invoice.discount_amount and invoice.get("is_cash_or_non_trade_discount") ): invoice_value_details.invoice_discount_amt = invoice.discount_amount else: invoice_value_details.invoice_discount_amt = 0 invoice_value_details.round_off = invoice.base_rounding_adjustment invoice_value_details.base_grand_total = abs(invoice.base_rounded_total) or abs( invoice.base_grand_total ) invoice_value_details.grand_total = abs(invoice.rounded_total) or abs(invoice.grand_total) invoice_value_details = update_invoice_taxes(invoice, invoice_value_details) return invoice_value_details
38352b3e46fb18435c780e5775bbc886491eac96
124
https://github.com/frappe/erpnext.git
33
def get_invoice_value_details(invoice): invoice_value_details = frappe._dict(dict()) invoice_value_details.base_total = abs(sum([i.taxable_value for i in invoice.get("items")])) if ( invoice.apply_discount_on == "Grand Total" and invoice.discount_amount and invoice.get("is_cash_or_non_trade_discount") ): invoice_value_details.invoice_discount_amt = invoice.discount_amount else: invoice_value_details.invoice_discount_a
22
203
get_invoice_value_details
94
0
1
4
python/ccxt/async_support/currencycom.py
18,699
1.73.50 [ci skip]
ccxt
9
Python
45
currencycom.py
async def fetch_balance(self, params={}): await self.load_markets() response = await self.privateGetV2Account(params) # # { # "makerCommission": "0.20", # "takerCommission": "0.20", # "buyerCommission": "0.20", # "sellerCommission": "0.20", # "canTrade": True, # "canWithdraw": True, # "canDeposit": True, # "updateTime": "1645266330", # "userId": "644722", # "balances": [ # { # "accountId": "120702016179403605", # "collateralCurrency": False, # "asset": "CAKE", # "free": "1.784", # "locked": "0.0", # "default": False, # }, # { # "accountId": "109698017413175316", # "collateralCurrency": True, # "asset": "USD", # "free": "7.58632", # "locked": "0.0", # "default": True, # } # ] # } # return self.parse_balance(response)
72c00f3d959baa1e355e8d8231e60561abe62eea
32
https://github.com/ccxt/ccxt.git
667
async def fetch_balance(self, params={}): await self.load_markets() response = await self.privateGetV2Account(params) # # { # "makerCommission": "0.20", # "takerCommission": "0.20", # "buyerCommission": "0.20", # "sellerCommission": "0.20", # "canTrade": True, # "canWithdraw": True, # "canDeposit": True, # "updateTime": "1645266330", # "userId": "644722", # "balances": [ # { # "accountId": "120702016179403605", # "collateralCurrency": False, # "asset": "CAKE", # "free": "1.784", # "locked": "0.0", # "default": False, # }, # { # "accountId": "109698017413175316", # "collateralCurrency": True, # "asset": "USD", # "free": "7.58632", # "locked": "0.0", #
7
85
fetch_balance
99
0
1
26
modules/image/semantic_segmentation/ace2p/data_feed.py
51,476
update ace2p (#2003) * update ace2p * add clean func * update ace2p
PaddleHub
12
Python
67
data_feed.py
def preprocess(org_im, scale, rotation): image = org_im.copy() image_height, image_width, _ = image.shape aspect_ratio = scale[1] * 1.0 / scale[0] image_center, image_scale = _box2cs([0, 0, image_width - 1, image_height - 1], aspect_ratio) trans = get_affine_transform(image_center, image_scale, rotation, scale) image = cv2.warpAffine( image, trans, (int(scale[1]), int(scale[0])), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0)) img_mean = np.array([0.406, 0.456, 0.485]).reshape((1, 1, 3)) img_std = np.array([0.225, 0.224, 0.229]).reshape((1, 1, 3)) image = image.astype(np.float32) image = (image / 255.0 - img_mean) / img_std image = image.transpose(2, 0, 1).astype(np.float32) image_info = { 'image_center': image_center, 'image_height': image_height, 'image_width': image_width, 'image_scale': image_scale, 'rotation': rotation, 'scale': scale } return image, image_info
000473594a0d7c7d27795d017abe961902251869
259
https://github.com/PaddlePaddle/PaddleHub.git
217
def preprocess(org_im, scale, rotation): image = org_im.copy() image_height, image_width, _ = image.shape aspect_ratio = scale[1] * 1.0 / scale[0] image_center, image_scale = _box2cs([0, 0, image_width - 1, image_height - 1], aspect_ratio) trans = get_affine_transform(image_center, image_scale, rotation, scale) image = cv2.warpAffine( image, trans, (int(scale[1]), int(scale[0])), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0)) img_mean = np.array([0.406, 0.456, 0.485]).reshape((1, 1, 3)) img_std = np.array([0.225, 0.224, 0.229]).reshape((1, 1, 3)) image = image.astype(np.float32) image = (image / 255.0 - img_mean) / img_std image = image.transpose(2, 0, 1).astype(np.float32) image_info = { 'image_center': image_center, 'image_height': image_height, 'image_width': image_width, 'image_scale': image_scale, 'rotation': rotation, 'scale': scale } return image, image_info
33
356
preprocess
210
0
7
39
python/ray/tune/trial_runner.py
138,677
[tune] De-clutter log outputs in trial runner (#24257) There are currently some debug logs left logging to INFO scope. This PR demotes them to DEBUG and cleans up the messages.
ray
14
Python
138
trial_runner.py
def _process_trial_result(self, trial, result): result.update(trial_id=trial.trial_id) is_duplicate = RESULT_DUPLICATE in result force_checkpoint = result.get(SHOULD_CHECKPOINT, False) # TrialScheduler and SearchAlgorithm still receive a # notification because there may be special handling for # the `on_trial_complete` hook. if is_duplicate: logger.debug("Trial finished without logging 'done'.") result = trial.last_result result.update(done=True) self._total_time += result.get(TIME_THIS_ITER_S, 0) flat_result = flatten_dict(result) self._validate_result_metrics(flat_result) if self._stopper(trial.trial_id, result) or trial.should_stop(flat_result): decision = TrialScheduler.STOP else: with warn_if_slow("scheduler.on_trial_result"): decision = self._scheduler_alg.on_trial_result(self, trial, flat_result) if decision == TrialScheduler.STOP: result.update(done=True) else: # Only updating search alg if the trial is not to be stopped. with warn_if_slow("search_alg.on_trial_result"): self._search_alg.on_trial_result(trial.trial_id, flat_result) # If this is not a duplicate result, the callbacks should # be informed about the result. if not is_duplicate: with warn_if_slow("callbacks.on_trial_result"): self._callbacks.on_trial_result( iteration=self._iteration, trials=self._trials, trial=trial, result=result.copy(), ) trial.update_last_result(result) # Include in next experiment checkpoint self.trial_executor.mark_trial_to_checkpoint(trial) # Checkpoints to disk. This should be checked even if # the scheduler decision is STOP or PAUSE. Note that # PAUSE only checkpoints to memory and does not update # the global checkpoint state. self._checkpoint_trial_if_needed(trial, force=force_checkpoint) if trial.is_saving: logger.debug(f"Caching trial decision for trial {trial}: {decision}") # Cache decision to execute on after the save is processed. # This prevents changing the trial's state or kicking off # another training step prematurely. self._cached_trial_decisions[trial.trial_id] = decision return None else: self._queue_decision(trial, decision) return decision
4a30ae0ab65b6f4e966aa9bd9b50720889c8458e
262
https://github.com/ray-project/ray.git
733
def _process_trial_result(self, trial, result): result.update(trial_id=trial.trial_id) is_duplicate =
43
446
_process_trial_result
13
0
1
4
tests/test_engine.py
54,372
Fix tests
prefect
10
Python
12
test_engine.py
async def test_non_prefect_types_return_completed_state(self): result_state = await return_value_to_state("foo") assert result_state.is_completed() assert result_state.data.decode() == "foo"
b5836927c71ed0448b674a89efeba64133b586cc
28
https://github.com/PrefectHQ/prefect.git
33
async def test_non_prefect_types_return_completed_state(self): result_state = await return_value_to_state("foo") assert result_state.is_completed() assert result_state.data.decode() == "foo"
7
52
test_non_prefect_types_return_completed_state
23
0
1
5
homeassistant/components/sonos/speaker.py
305,722
Make Sonos typing more complete (#68072)
core
10
Python
21
speaker.py
def subscription_address(self) -> str: assert len(self._subscriptions) > 0 addr, port = self._subscriptions[0].event_listener.address return ":".join([addr, str(port)]) # # Subscription handling and event dispatchers #
73ba7a989b0cae6fba3564947d819e1eeb423f54
45
https://github.com/home-assistant/core.git
60
def subscription_address(self) -> str: assert len(self._subscriptions) > 0 addr, port = self._subscriptions[0].event_listener.address return ":".jo
10
77
subscription_address
57
0
1
11
tests/css/test_tokenize.py
182,106
Parsing variable values as individual tokens
textual
11
Python
32
test_tokenize.py
def test_variable_declaration_no_semicolon(): css = "$x: 1\n$y: 2" assert list(tokenize(css, "")) == [ Token(name="variable_name", value="$x:", code=css, path="", location=(0, 0)), Token(name="whitespace", value=" ", code=css, path="", location=(0, 3)), Token(name="number", value="1", code=css, path="", location=(0, 4)), Token(name="variable_value_end", value="\n", code=css, path="", location=(0, 5)), Token(name="variable_name", value="$y:", code=css, path="", location=(1, 0)), Token(name="whitespace", value=" ", code=css, path="", location=(1, 3)), Token(name="number", value="2", code=css, path="", location=(1, 4)), ]
6587ba257fc5ea07968752aeaf818b1002cdae0f
209
https://github.com/Textualize/textual.git
114
def test_variable_declaration_no_semicolon(): css = "$x: 1\n$y: 2" assert list(tokenize(css, "")) == [ Token(name="variable_name", value="$x:", code=css, path="", location=(0, 0)), Token(name="whitespace", value=" ", code=css, path="", location=(0, 3)), Token(name="number", value="1", code=css, path="", location=(0, 4)), Token(name="variable_value_end", value="\n", code=css, path="", location=(0, 5)), Token(name="variable_name", value="$y:", code=css, path="", location=(1, 0)), Token(name="whitespace", value=" ", code=css, path="", lo
10
326
test_variable_declaration_no_semicolon
83
0
8
31
lib/matplotlib/tests/test_ticker.py
108,632
Remove *math* parameter of various mathtext internal APIs. The *math* parameter is passed through many layers of the call stack but is ultimately only used for a single purpose: deciding whether to replace the ASCII hyphen by a (longer) unicode minus. Instead of doing that, just do the substitution at the parsing stage. In particular, this fixes problematic unicode minus support with the "cm" fontset. This patch also reverts a significant part of 52003e4, as LogFormatters no longer need to pass unicode minuses in mathtext -- everything gets converted by mathtext. Likewise, this change also invalidates the test_log_scales baseline image (old, buggy wrt. unicode minus); replace it by a test that the drawn ticks are as expected (which was the intent in 90c1aa3).
matplotlib
13
Python
52
test_ticker.py
def logit_deformatter(string): r match = re.match( r"[^\d]*" r"(?P<comp>1-)?" r"(?P<mant>\d*\.?\d*)?" r"(?:\\cdot)?" r"(?:10\^\{(?P<expo>-?\d*)})?" r"[^\d]*$", string, ) if match: comp = match["comp"] is not None mantissa = float(match["mant"]) if match["mant"] else 1 expo = int(match["expo"]) if match["expo"] is not None else 0 value = mantissa * 10 ** expo if match["mant"] or match["expo"] is not None: if comp: return 1 - value return value match = re.match( r"[^\d]*\\frac\{(?P<num>\d+)\}\{(?P<deno>\d+)\}[^\d]*$", string ) if match: num, deno = float(match["num"]), float(match["deno"]) return num / deno raise ValueError("Not formatted by LogitFormatter")
85f30cbd485eddc93e3c9ff115ac21c0886909d5
148
https://github.com/matplotlib/matplotlib.git
352
def logit_deformatter(string): r match = re.match( r"[^\d]*" r"(?P<comp>1-)?" r"(?P<mant>\d*\.?\d*)?" r"(?:\\
13
245
logit_deformatter
218
1
6
55
keras/optimizers/__init__.py
279,360
Flip the default optimizer to experimental optimizer when deserializing optimizer. PiperOrigin-RevId: 465336057
keras
12
Python
129
__init__.py
def deserialize(config, custom_objects=None, **kwargs): # loss_scale_optimizer has a direct dependency of optimizer, import here # rather than top to avoid the cyclic dependency. from keras.mixed_precision import ( loss_scale_optimizer, ) use_legacy_optimizer = kwargs.pop("use_legacy_optimizer", True) if len(config["config"]) > 0: # If the optimizer config is not empty, then we use the value of # `is_legacy_optimizer` to override `use_legacy_optimizer`. If # `is_legacy_optimizer` does not exist in config, it means we are # using the legacy optimzier. use_legacy_optimizer = config["config"].get("is_legacy_optimizer", True) if ( tf.__internal__.tf2.enabled() and tf.executing_eagerly() and not use_legacy_optimizer ): all_classes = { "adadelta": adadelta_experimental.Adadelta, "adagrad": adagrad_experimental.Adagrad, "adam": adam_experimental.Adam, "adamax": adamax_experimental.Adamax, "experimentaladadelta": adadelta_experimental.Adadelta, "experimentaladagrad": adagrad_experimental.Adagrad, "experimentaladam": adam_experimental.Adam, "experimentalsgd": sgd_experimental.SGD, "nadam": nadam_experimental.Nadam, "rmsprop": rmsprop_experimental.RMSprop, "sgd": sgd_experimental.SGD, "ftrl": ftrl_experimental.Ftrl, "lossscaleoptimizer": loss_scale_optimizer.LossScaleOptimizerV3, "lossscaleoptimizerv3": loss_scale_optimizer.LossScaleOptimizerV3, # LossScaleOptimizerV1 was an old version of LSO that was removed. # Deserializing it turns it into a LossScaleOptimizer "lossscaleoptimizerv1": loss_scale_optimizer.LossScaleOptimizer, } else: all_classes = { "adadelta": adadelta_v2.Adadelta, "adagrad": adagrad_v2.Adagrad, "adam": adam_v2.Adam, "adamax": adamax_v2.Adamax, "experimentaladadelta": adadelta_experimental.Adadelta, "experimentaladagrad": adagrad_experimental.Adagrad, "experimentaladam": adam_experimental.Adam, "experimentalsgd": sgd_experimental.SGD, "nadam": nadam_v2.Nadam, "rmsprop": rmsprop_v2.RMSprop, "sgd": gradient_descent_v2.SGD, "ftrl": ftrl.Ftrl, "lossscaleoptimizer": loss_scale_optimizer.LossScaleOptimizer, "lossscaleoptimizerv3": loss_scale_optimizer.LossScaleOptimizerV3, # LossScaleOptimizerV1 was an old version of LSO that was removed. # Deserializing it turns it into a LossScaleOptimizer "lossscaleoptimizerv1": loss_scale_optimizer.LossScaleOptimizer, } # Make deserialization case-insensitive for built-in optimizers. if config["class_name"].lower() in all_classes: config["class_name"] = config["class_name"].lower() return deserialize_keras_object( config, module_objects=all_classes, custom_objects=custom_objects, printable_module_name="optimizer", ) @keras_export( "keras.__internal__.optimizers.convert_to_legacy_optimizer", v1=[] )
30bf872258415cd4a83ac1a33b031cc804981a9c
@keras_export( "keras.__internal__.optimizers.convert_to_legacy_optimizer", v1=[] )
311
https://github.com/keras-team/keras.git
761
def deserialize(config, custom_objects=None, **kwargs): # loss_scale_optimizer has a direct dependency of optimizer, import here # rather than top to avoid the cyclic dependency. from keras.mixed_precision import ( loss_scale_optimizer, ) use_legacy_optimizer = kwargs.pop("use_legacy_optimizer", True) if len(config["config"]) > 0: # If the optimizer config is not empty, then we use the value of #
49
547
deserialize
15
0
1
8
tests/sentry/api/endpoints/test_organization_member_team_details.py
97,203
ref(tests): DRY OrganizationMember tests. (#32715)
sentry
12
Python
15
test_organization_member_team_details.py
def test_member_can_leave(self): self.login_as(self.team_member.user) self.get_success_response( self.org.slug, self.team_member.id, self.team.slug, status_code=status.HTTP_200_OK ) assert not OrganizationMemberTeam.objects.filter( team=self.team, organizationmember=self.team_member ).exists()
e676b34aea4b38ee33ee0dd2de5e0cc8e546ae1a
67
https://github.com/getsentry/sentry.git
71
def test_member_can_leave(self): self.login_as(self.team_member.user) self.get_success_response( self.org.slug, self.team_member.id, self.team.slug, status_code=status.HTTP_200_OK ) assert not OrganizationMemberTeam.objects.filter( team=self.team, organizationmember=self.team_member ).exists()
18
102
test_member_can_leave
63
0
1
14
tests/test_api_validate.py
187,112
plugin.api.validate: turn module into package Turn module into package with multiple logical sub-modules: - Define a public interface in the package's `__init__` module - Split validation schemas, validators and validate logic - schemas: classes which register attributes used by their respective `validate` implementations - validators: functions which can internally call `validate` and which return something that can be validated - validate: singledispatch functions which implement the validation logic for schemas and various other types - Rename validation schemas for better internal references - Rename singledispatch methods Other clean-up work: - Update comments and fix grammar - Add type annotations - Use f-strings - Use `str` instead of the `text` alias - Simplify some code blocks - Rearrange classes and functions - Rephrase certain error messages - Add a few more tests for better code coverage
streamlink
15
Python
39
test_api_validate.py
def test_url(self): url_ = "https://google.se/path" assert validate(url(), url_) assert validate(url(scheme="http"), url_) assert validate(url(path="/path"), url_) with self.assertRaises(ValueError) as cm: validate(url(), "foo") assert str(cm.exception) == "'foo' is not a valid URL" with self.assertRaises(ValueError) as cm: validate(url(foo="bar"), "https://foo") assert str(cm.exception) == "Invalid URL attribute 'foo'" with self.assertRaises(ValueError) as cm: validate(url(path=endswith(".m3u8")), "https://foo/bar.mpd") assert str(cm.exception) == "Unable to validate URL attribute 'path': '/bar.mpd' does not end with '.m3u8'"
120c10302381600abb4044083ce0a106b31df8f0
131
https://github.com/streamlink/streamlink.git
165
def test_url(self): url_ = "https://google.se/path" assert validate(url(), url_) assert validate(url(scheme="http"), url
14
237
test_url
11
0
1
4
tests/test_filesystems.py
58,992
Added Sync API for public interface methods. (#6511) * Added sync api * Removed trailing Spaces * filesystem tests for sync methods * sync tests * added more tests * adding test to resolve conflict * resolving git conflict * removed redundant/unneccessary tests * removed redundant/unneccessary tests Co-authored-by: Michael Adkins <michael@prefect.io> Co-authored-by: Bada-S <jamessopkin@gmail.com> Co-authored-by: James Bada Sopkin <69161193+Bada-S@users.noreply.github.com>
prefect
10
Python
11
test_filesystems.py
def test_read_write_roundtrip_sync(self): fs = RemoteFileSystem(basepath="memory://root") fs.write_path("test.txt", content=b"hello") assert fs.read_path("test.txt") == b"hello"
6e0a171ae169f4db1cfdd5ad9e0a576ff4962386
34
https://github.com/PrefectHQ/prefect.git
31
def test_read_write_roundtrip_sync(self): fs = RemoteFileSystem(basepath="memory://root") fs.write_path("test.txt", content=b"hello")
8
61
test_read_write_roundtrip_sync
22
0
5
10
mitmproxy/tls.py
251,165
`pyupgrade --py39-plus **/*.py`
mitmproxy
16
Python
18
tls.py
def alpn_protocols(self) -> list[bytes]: if self._client_hello.extensions: for extension in self._client_hello.extensions.extensions: if extension.type == 0x10: return list(x.name for x in extension.body.alpn_protocols) return []
e83ec8390ad6be6a86cfcfc57bce14cb8861bf32
54
https://github.com/mitmproxy/mitmproxy.git
88
def alpn_protocols(self) -> list[bytes]: if self._client_hello.extensions: for extension in self._client_hello.extensions.extensions: if extension.type == 0x10: return list(x.name for x in extension.body.alpn_protocols) return []
11
85
alpn_protocols
152
0
9
61
cps/kobo.py
172,481
Fixes for kobosync with multiple users (#2230)
calibre-web
25
Python
82
kobo.py
def sync_shelves(sync_token, sync_results, only_kobo_shelves=False): new_tags_last_modified = sync_token.tags_last_modified # transmit all archived shelfs independent of last sync (why should this matter?) for shelf in ub.session.query(ub.ShelfArchive).filter(ub.ShelfArchive.user_id == current_user.id): new_tags_last_modified = max(shelf.last_modified, new_tags_last_modified) sync_results.append({ "DeletedTag": { "Tag": { "Id": shelf.uuid, "LastModified": convert_to_kobo_timestamp_string(shelf.last_modified) } } }) ub.session.delete(shelf) ub.session_commit() extra_filters = [] if only_kobo_shelves: for shelf in ub.session.query(ub.Shelf).filter( func.datetime(ub.Shelf.last_modified) > sync_token.tags_last_modified, ub.Shelf.user_id == current_user.id, not ub.Shelf.kobo_sync ): sync_results.append({ "DeletedTag": { "Tag": { "Id": shelf.uuid, "LastModified": convert_to_kobo_timestamp_string(shelf.last_modified) } } }) extra_filters.append(ub.Shelf.kobo_sync) if sqlalchemy_version2: shelflist = ub.session.execute(select(ub.Shelf).outerjoin(ub.BookShelf).filter( or_(func.datetime(ub.Shelf.last_modified) > sync_token.tags_last_modified, func.datetime(ub.BookShelf.date_added) > sync_token.tags_last_modified), ub.Shelf.user_id == current_user.id, *extra_filters ).distinct().order_by(func.datetime(ub.Shelf.last_modified).asc())).columns(ub.Shelf) else: shelflist = ub.session.query(ub.Shelf).outerjoin(ub.BookShelf).filter( or_(func.datetime(ub.Shelf.last_modified) > sync_token.tags_last_modified, func.datetime(ub.BookShelf.date_added) > sync_token.tags_last_modified), ub.Shelf.user_id == current_user.id, *extra_filters ).distinct().order_by(func.datetime(ub.Shelf.last_modified).asc()) for shelf in shelflist: if not shelf_lib.check_shelf_view_permissions(shelf): continue new_tags_last_modified = max(shelf.last_modified, new_tags_last_modified) tag = create_kobo_tag(shelf) if not tag: continue if shelf.created > sync_token.tags_last_modified: sync_results.append({ "NewTag": tag }) else: sync_results.append({ "ChangedTag": tag }) sync_token.tags_last_modified = new_tags_last_modified ub.session_commit() # Creates a Kobo "Tag" object from a ub.Shelf object
df670795731e7568462869b815f4eb39da1bb41a
462
https://github.com/janeczku/calibre-web.git
757
def sync_shelves(sync_token, sync_results, only_kobo_shelves=False): new_tags_last_modified = sync_token.tags_last_modified # transmit all archived shelfs independent of last sync (why should this matter?) for shelf in ub.session.query(ub.ShelfArchive).filter(ub.ShelfArchive.user_id == current_user.id): new_tags_last_modified = max(shelf.last_modified, new_tags_last_modified) sync_results.append({ "DeletedTag": { "Tag": { "Id": shelf.uuid, "LastModified": convert_to_kobo_timestamp_string(shelf.last_modified) } } }) ub.session.delete(shelf) ub.session_commit() extra_filters = [] if only_kobo_shelves: for shelf in ub.session.query(ub.Shelf).filter( func.datetime(ub.Shelf.last_modified) > sync_token.tags_last_modified, ub.Shelf.user_id == current_user.id, not ub.Shelf.kobo_sync ): sync_results.append({ "DeletedTag": { "Tag": { "Id": shelf.uuid, "LastModified": convert_to_kobo_timestamp_string(shelf.last_modified) } } }) extra_filters.append(ub.Shelf.kobo_sync) if sqlalchemy_version2: shelflist = ub.sessio
44
741
sync_shelves
113
1
4
24
python/ray/data/tests/test_dataset_formats.py
144,773
Split test_dataset.py into two (#22303)
ray
14
Python
77
test_dataset_formats.py
def test_json_roundtrip(ray_start_regular_shared, fs, data_path): # Single block. df = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]}) ds = ray.data.from_pandas([df]) ds._set_uuid("data") ds.write_json(data_path, filesystem=fs) file_path = os.path.join(data_path, "data_000000.json") ds2 = ray.data.read_json([file_path], filesystem=fs) ds2df = ds2.to_pandas() assert ds2df.equals(df) # Test metadata ops. for block, meta in ds2._blocks.get_blocks_with_metadata(): BlockAccessor.for_block(ray.get(block)).size_bytes() == meta.size_bytes if fs is None: os.remove(file_path) else: fs.delete_file(_unwrap_protocol(file_path)) # Two blocks. df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]}) ds = ray.data.from_pandas([df, df2]) ds._set_uuid("data") ds.write_json(data_path, filesystem=fs) ds2 = ray.data.read_json(data_path, parallelism=2, filesystem=fs) ds2df = ds2.to_pandas() assert pd.concat([df, df2], ignore_index=True).equals(ds2df) # Test metadata ops. for block, meta in ds2._blocks.get_blocks_with_metadata(): BlockAccessor.for_block(ray.get(block)).size_bytes() == meta.size_bytes @pytest.mark.parametrize( "fs,data_path,endpoint_url", [ (None, lazy_fixture("local_path"), None), (lazy_fixture("local_fs"), lazy_fixture("local_path"), None), (lazy_fixture("s3_fs"), lazy_fixture("s3_path"), lazy_fixture("s3_server")), ], )
85d6946c9524d8544e69262f737018151efb1567
@pytest.mark.parametrize( "fs,data_path,endpoint_url", [ (None, lazy_fixture("local_path"), None), (lazy_fixture("local_fs"), lazy_fixture("local_path"), None), (lazy_fixture("s3_fs"), lazy_fixture("s3_path"), lazy_fixture("s3_server")), ], )
296
https://github.com/ray-project/ray.git
237
def test_json_roundtrip(ray_start_regular_shared, fs, data_path): # Single block. df = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]}) ds = ray.data.from_pandas([df]) ds._set_uuid("data") ds.write_json(data_path, filesystem=fs) file_path = os.path.join(data_path, "data_000000.json") ds2 = ray.data.read_json([file_path], filesystem=fs) ds2df = ds2.to_pandas() assert ds2df.equals(df) # Test metadata ops. for block, meta in ds2._blocks.get_blocks_with_metadata(): BlockAccessor.for_block(ray.get(block)).size_bytes() == meta.size_bytes if fs is None: os.remove(file_path) else: fs.delete_file(_unwrap_protocol(file_path)) # Two blocks. df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]}) ds = ray.data.from_pandas([df, df2]) ds._set_uuid("data") ds.write_json(data_path, filesystem=fs) ds2 = ray.data.read_json(data_path, parallelism=2, filesystem=fs) ds2df = ds2.to_pandas() assert pd.concat([df, df2], ignore_index=True).equals(ds2df) # Test metadata ops. for b
42
571
test_json_roundtrip
6
0
1
2
homeassistant/components/usgs_earthquakes_feed/geo_location.py
314,959
Migrate usgs_earthquakes_feed to async library (#68370) * use new async integration library * migrate to new async integration library * updated unit tests * updated logger * fix tests and improve test coverage * fix test * fix requirements * time control to fix tests
core
9
Python
6
geo_location.py
async def _remove_entity(self, external_id): async_dispatcher_send(self._hass, SIGNAL_DELETE_ENTITY.format(external_id))
21d28dd35629a7f4fc086bf9ff4f65ee9270873b
21
https://github.com/home-assistant/core.git
20
async def _remove_entity(self, external_id): async_dispatcher_send(self._hass, SIGNAL_DELETE_ENTITY.format(ext
7
36
_remove_entity
16
0
1
11
metrics/mahalanobis/mahalanobis.py
104,370
Add Mahalanobis distance metric (#3794) * Add Mahalanobis class metric * reformat code * try to fix tests * reformat file with black * fix metric example * reformat with black * running isort * fix flake8 * change assert to ValueError * change metric's features * Update metrics/mahalanobis/mahalanobis.py Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> * PR feedback * Update metrics/mahalanobis/mahalanobis.py Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
datasets
18
Python
16
mahalanobis.py
def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float", id="sequence"), id="X"), } ), )
52b3ba8a3173adb0a4d6411f2b4978551672e450
55
https://github.com/huggingface/datasets.git
133
def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Fe
15
88
_info
17
0
1
9
tests/unit/recommenders/evaluation/test_python_evaluation_time_performance.py
39,432
Improved time of generating synthedic data, updated pytest fixtures, removed unused lines, updated benchmark results
recommenders
10
Python
17
test_python_evaluation_time_performance.py
def test_python_map_at_k(rating_true, rating_pred): with Timer() as t: map_at_k( rating_true=rating_true, rating_pred=rating_pred, col_prediction=DEFAULT_PREDICTION_COL, k=10, ) assert t.interval < 29.90376154
9d7d8212292b05605bb2b7c5c425c107d4266e8c
41
https://github.com/microsoft/recommenders.git
80
def test_python_map_at_k(rating_true, rating_pred): with Timer() as t: map_at_k(
10
61
test_python_map_at_k
97
0
1
12
demo/kinematics_blocks/run.py
179,813
kinematics block
gradio
12
Python
70
run.py
def plot(v, a): g = 9.81 theta = a/180*3.14 tmax = ((2 * v) * np.sin(theta)) / g timemat = tmax*np.linspace(0,1,40)[:,None] x = ((v * timemat) * np.cos(theta)) y = ((v * timemat) * np.sin(theta)) - ((0.5 * g) * (timemat ** 2)) fig = plt.figure() plt.scatter(x=x, y=y, marker='.') plt.xlim(0, 100) plt.ylim(0, 60) return fig block = gr.Blocks() with block: gr.Markdown("Let's do some kinematics! Choose the speed and angle to see the trajectory.") with gr.Row(): speed = gr.Slider(25, min=1, max=30,label="Speed") angle = gr.Slider(45, min=0, max=90, label="Angle") output = gr.Image(type="plot") btn = gr.Button("Run") btn.click(plot, [speed, angle], output) block.launch()
ef533b23321de5a37f81f9d5b5c60f8ea8e620a7
147
https://github.com/gradio-app/gradio.git
163
def plot(v, a): g = 9.81 theta = a/180*3.14 tmax = ((2 * v) * np.sin(theta)) / g timemat = tmax*np.linspace(0,1,40)[:,None] x = ((v * timemat) * np.cos(theta)) y = ((v * timemat) * np.sin(theta))
38
383
plot
19
0
2
6
homeassistant/components/twinkly/light.py
291,598
Add Twinkly effects (#82861) * Add Twinkly effects * Remove spurious comment
core
10
Python
18
light.py
async def async_update_movies(self) -> None: movies = await self._client.get_saved_movies() _LOGGER.debug("Movies: %s", movies) if "movies" in movies: self._movies = movies["movies"]
33cd59d3c2e1f945c16b39d929349e3eeb4cfb9a
39
https://github.com/home-assistant/core.git
58
async def async_update_movies(self) -> None: movies = await self._client.get_saved_movies() _LOGGER.debug("Movies: %s", movies) if "movie
8
72
async_update_movies
99
0
6
21
pipenv/patched/notpip/_internal/cli/req_command.py
19,845
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
pipenv
9
Python
77
req_command.py
def warn_if_run_as_root() -> None: if running_under_virtualenv(): return if not hasattr(os, "getuid"): return # On Windows, there are no "system managed" Python packages. Installing as # Administrator via pip is the correct way of updating system environments. # # We choose sys.platform over utils.compat.WINDOWS here to enable Mypy platform # checks: https://mypy.readthedocs.io/en/stable/common_issues.html if sys.platform == "win32" or sys.platform == "cygwin": return if os.getuid() != 0: return logger.warning( "Running pip as the 'root' user can result in broken permissions and " "conflicting behaviour with the system package manager. " "It is recommended to use a virtual environment instead: " "https://pip.pypa.io/warnings/venv" )
f3166e673fe8d40277b804d35d77dcdb760fc3b3
56
https://github.com/pypa/pipenv.git
191
def warn_if_run_as_root() -> None: if running_under_virtualenv(): return if not hasattr(os, "getuid"): return # On Windows, there are no "system managed" Python packages. Installing as # Administrator via pip is the correct way of updating system environments. # # We choose sys.platform over utils.compat.WINDOWS here to enable Mypy platform # checks: https://mypy.readthedocs.io/en/stable/common_issues.html if sys.platform == "win32" or sys.platform == "cygwin": return
9
114
warn_if_run_as_root
11
0
1
3
terminal.py
283,333
Updating some names (#1575) * quick econ fix * black * keys and feature flags * terminal name :eyes: * some more replacements * some more replacements * edit pyproject * gst -> openbb * add example portfolios back to git * Update api from gst * sorry. skipping some tests * another round of names * another round of test edits * Missed some .gst refs and update timezone * water mark stuff * Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS * fix more GST to OpenBB Terminal * Logging : merge conflicts with main * Revert wrong files Co-authored-by: Andrew <andrew.kenreich@gmail.com> Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt> Co-authored-by: Chavithra PARANA <chavithra@gmail.com>
OpenBBTerminal
9
Python
11
terminal.py
def call_etf(self, _): from openbb_terminal.etf.etf_controller import ETFController self.queue = self.load_class(ETFController, self.queue)
b71abcfbf4d7e8ac1855522aff0378e13c8b5362
30
https://github.com/OpenBB-finance/OpenBBTerminal.git
32
def call_etf(self, _): from openbb_terminal.etf.etf_controller import ETFController self.queue = self.load
9
47
call_etf
28
0
5
10
scripts/dev/update_3rdparty.py
320,709
Use legacy PDF.js build for macOS/Windows releases Fixes #7108
qutebrowser
13
Python
25
update_3rdparty.py
def find_pdfjs_asset(assets, legacy): for asset in assets: name = asset["name"] if ( name.startswith("pdfjs-") and name.endswith("-dist.zip") and name.endswith("-legacy-dist.zip") == legacy ): return asset raise Exception(f"No pdfjs found in {assets}")
f6a365172afe127a4ba770e14569f2d3cd7569b4
53
https://github.com/qutebrowser/qutebrowser.git
102
def find_pdfjs_asset(assets, legacy): for asset in assets: name = asset["name"] if ( name.startswith("pdfjs-") and name.endswith("-dist.zip") and name.endswith("-legacy-dist.zip") == legacy ): return a
8
97
find_pdfjs_asset
27
0
1
11
tests/admin_views/tests.py
207,648
Refs #33476 -- Reformatted code with Black.
django
11
Python
24
tests.py
def test_extended_bodyclass_template_delete_selected_confirmation(self): group = Group.objects.create(name="foogroup") post_data = { "action": "delete_selected", "selected_across": "0", "index": "0", "_selected_action": group.id, } response = self.client.post(reverse("admin:auth_group_changelist"), post_data) self.assertEqual(response.context["site_header"], "Django administration") self.assertContains(response, "bodyclass_consistency_check ")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
76
https://github.com/django/django.git
120
def test_extended_bodyclass_template_delete_selected_confirmation(self): group = Group.objects.create(name="foogroup") post_data = { "action": "delete_selected",
16
140
test_extended_bodyclass_template_delete_selected_confirmation
173
0
2
23
tests/test_arrow_dataset.py
105,172
Optimize contiguous shard and select (#4466) * optimize contiguous shard and select * minor * support iterators (and therefore generators) * comments + docstrings
datasets
15
Python
110
test_arrow_dataset.py
def test_concatenate_with_indices_from_disk(self, in_memory): data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7]} info1 = DatasetInfo(description="Dataset1") info2 = DatasetInfo(description="Dataset2") with tempfile.TemporaryDirectory() as tmp_dir: dset1, dset2, dset3 = ( Dataset.from_dict(data1, info=info1), Dataset.from_dict(data2, info=info2), Dataset.from_dict(data3), ) dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3) dset1, dset2, dset3 = ( dset1.select([2, 1, 0], indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow")), dset2.select([2, 1, 0], indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow")), dset3.select([1, 0], indices_cache_file_name=os.path.join(tmp_dir, "i3.arrow")), ) with concatenate_datasets([dset3, dset2, dset1]) as dset_concat: self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2)) self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3)) self.assertListEqual(dset_concat["id"], [7, 6, 5, 4, 3, 2, 1, 0]) # in_memory = False: # 3 cache files for the dset_concat._data table, and 1 for the dset_concat._indices_table # There is only 1 for the indices tables (i1.arrow) # Indeed, the others are brought to memory since an offset is applied to them. # in_memory = True: # 1 cache file for i1.arrow since both dset_concat._data and dset_concat._indices are in memory self.assertEqual(len(dset_concat.cache_files), 1 if in_memory else 3 + 1) self.assertEqual(dset_concat.info.description, "Dataset2\n\nDataset1") del dset1, dset2, dset3
599403601739e7a73e8ebbc8653d246e07207265
347
https://github.com/huggingface/datasets.git
532
def test_concatenate_with_indices_from_disk(self, in_memory): data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7]} info1 = DatasetInfo(description="Dataset1") info2 = DatasetInfo(description="Dataset2") with tempfile.TemporaryDirectory() as tmp_dir: dset1, dset2, dset3 = ( Dataset.from_dict(data1, info=info1), Dataset.from_dict(data2, info=info2), Dataset.from_dict(data3), ) dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3) dset1, dset2, dset3 = ( dset1.select([2,
31
523
test_concatenate_with_indices_from_disk
41
0
2
3
freqtrade/rpc/api_server/ws/channel.py
151,748
log warning if channel too far behind, add docstrings to message stream
freqtrade
14
Python
37
channel.py
def _calc_send_limit(self): # Only update if we have enough data if len(self._send_times) == self._send_times.maxlen: # At least 1s or twice the average of send times, with a # maximum of 3 seconds per message self._send_high_limit = min(max(self.avg_send_time * 2, 1), 3)
afc00bc30a94abd64fee000535e66287fd91595f
39
https://github.com/freqtrade/freqtrade.git
95
def _calc_send_limit(self): # Only update if we have enough data if len(self._send_times) == self._send_times.maxlen: # At least 1s or twice the average of send times, with a # maximum of 3 seconds per message self._send_hig
9
66
_calc_send_limit
146
0
5
39
gamestonk_terminal/portfolio/portfolio_analysis/pa_controller.py
281,511
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt> Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: james <jmaslek11@gmail.com> Co-authored-by: jose-donato <zmcdonato@gmail.com>
OpenBBTerminal
14
Python
106
pa_controller.py
def call_group(self, other_args): parser = argparse.ArgumentParser( prog="group", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Displays portfolio grouped by a given column", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-g") parser.add_argument( "-g", "--group", type=str, dest="group", default="Ticker", choices=self.portfolio.columns, help="Column to group by", ) parser.add_argument( "-a", "--allocation", action="store_true", default=False, help="Add allocation column in % to dataframe", dest="allocation", ) # The following arguments will be used in a later PR for customizable 'reports' # The --func flag will need to be tested that it exists for pandas groupby # parser.add_argument("-f", # "--func", # type=str, # dest="function", # help="Aggregate function to apply to groups" # ) # parser.add_argument("-d", # "--display", # default = None, # help = "Columns to display", # dest="cols") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if "value" in self.portfolio.columns: portfolio_view.display_group_holdings( portfolio=self.portfolio, group_column=ns_parser.group, allocation=ns_parser.allocation, ) else: console.print( "'value' column not in portfolio. " "Either add manually or load without --no_last_price flag\n" )
82747072c511beb1b2672846ae2ee4aec53eb562
165
https://github.com/OpenBB-finance/OpenBBTerminal.git
863
def call_group(self, other_args): parser = argparse.ArgumentParser( prog="group", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter,
31
287
call_group
8
0
1
3
pipenv/patched/notpip/_vendor/pyparsing/testing.py
20,654
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
pipenv
10
Python
8
testing.py
def assertRaisesParseException(self, exc_type=ParseException, msg=None): with self.assertRaises(exc_type, msg=msg): yield
f3166e673fe8d40277b804d35d77dcdb760fc3b3
26
https://github.com/pypa/pipenv.git
33
def assertRaisesParseException(self, exc_type=ParseException, msg=None): with self.assertRaises(exc_type, msg=msg): yield
6
42
assertRaisesParseException
61
1
3
18
django/contrib/humanize/templatetags/humanize.py
204,154
Refs #33476 -- Reformatted code with Black.
django
10
Python
49
humanize.py
def apnumber(value): try: value = int(value) except (TypeError, ValueError): return value if not 0 < value < 10: return value return ( _("one"), _("two"), _("three"), _("four"), _("five"), _("six"), _("seven"), _("eight"), _("nine"), )[value - 1] # Perform the comparison in the default time zone when USE_TZ = True # (unless a specific time zone has been applied with the |timezone filter). @register.filter(expects_localtime=True)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
@register.filter(expects_localtime=True)
86
https://github.com/django/django.git
160
def apnumber(value): try: value = int(value) except (TypeError, ValueError): return value if not 0 < value < 10: return value return ( _("one"), _("two"), _("three"), _("four"), _("five"), _("six"), _("seven"), _("eight"), _("nine"), )[value - 1] # Perform the comparison in the default time zone
9
165
apnumber
18
0
1
7
src/textual/_terminal_modes.py
183,576
[terminal buffering] Address PR feedback
textual
8
Python
17
_terminal_modes.py
def mode_is_supported(self) -> bool: return self.report_parameter in MODE_REPORTS_PARAMETERS_INDICATING_SUPPORT MODE_REPORTS_PARAMETERS_INDICATING_SUPPORT = frozenset( { ModeReportParameter.Set, ModeReportParameter.Reset, ModeReportParameter.PermanentlySet, ModeReportParameter.PermanentlyReset, } )
7f27e70440c177b2a047b7f74a78ed5cd5b4b596
14
https://github.com/Textualize/textual.git
64
def mode_is_supported(self) -> bool: return self.report_parameter in MODE_REPORTS_PARAMETERS_INDICATING_SUPPORT MODE_REPOR
11
57
mode_is_supported
25
0
1
4
python3.10.4/Lib/datetime.py
222,381
add python 3.10.4 for windows
XX-Net
9
Python
22
datetime.py
def _days_before_year(year): "year -> number of days before January 1st of year." y = year - 1 return y*365 + y//4 - y//100 + y//400
8198943edd73a363c266633e1aa5b2a9e9c9f526
18
https://github.com/XX-net/XX-Net.git
33
def _days_before_year(year): "year -> number of days before January 1st of year." y = year - 1 return y*365 + y//4 - y//100 + y//400
3
45
_days_before_year
15
0
1
6
tests/components/matter/test_init.py
297,669
Fix matter websocket reconnect (#84192)
core
11
Python
15
test_init.py
def listen_ready_timeout_fixture() -> Generator[int, None, None]: with patch( "homeassistant.components.matter.LISTEN_READY_TIMEOUT", new=0 ) as timeout: yield timeout
6a8d9a91cb3fd5a55f13de54ea5db23125e72632
28
https://github.com/home-assistant/core.git
38
def listen_ready_timeout_fixture() -> Generator[int, None, None]: with patch( "homeassistant.components.matter.LISTEN_READY_TIMEOUT", new=0 ) as timeout: yield timeout
6
50
listen_ready_timeout_fixture
23
0
1
9
tests/sentry/models/test_groupmeta.py
91,424
ref: replace self.assertRaises with pytest.raises (#35685) * add flake8 plugin to detect assertRaises * ref: replace self.assertRaises with pytest.raises * non-sed fixes
sentry
11
Python
16
test_groupmeta.py
def test_get_value_bulk(self): with pytest.raises(GroupMeta.CacheNotPopulated): GroupMeta.objects.get_value_bulk([self.group], "foo") GroupMeta.objects.create(group=self.group, key="foo", value="bar") with pytest.raises(GroupMeta.CacheNotPopulated): GroupMeta.objects.get_value_bulk([self.group], "foo") GroupMeta.objects.populate_cache([self.group]) result = GroupMeta.objects.get_value_bulk([self.group], "foo") assert result == {self.group: "bar"}
284e980df0018f8baee659999268bdd4c7d08255
111
https://github.com/getsentry/sentry.git
86
def test_get_value_bulk(self): with pytest.raises(GroupMeta.CacheNotPopulated): GroupMeta.objects.get_value_bulk([self.group], "foo") GroupMeta.objects.create(group=self.group, key="foo", value="bar") with pytest.raises(GroupMeta.CacheNotPopulated): GroupMet
14
186
test_get_value_bulk
20
0
3
5
tests/conftest.py
282,470
Updated uninstall brotli wording (#1333)
OpenBBTerminal
12
Python
19
conftest.py
def brotli_check(): installed_packages = pkg_resources.working_set for item in list(installed_packages): if "brotli" in str(item).lower(): pytest.exit("Uninstall brotli and brotlipy before running tests")
f8fc7d00ffe6b22b2e2a951fb887f1312644d32f
35
https://github.com/OpenBB-finance/OpenBBTerminal.git
43
def brotli_check(): installed_packages = pkg_resources.working_set for item in list(installed_packages): if "brotli" in str(item).lower(): pytest.exit("Uninstall brotli and brotlipy before running tests")
10
62
brotli_check
27
0
3
5
TTS/tts/utils/text/phonemizers/espeak_wrapper.py
262,648
Handle espeak 1.48.15 (#2203)
TTS
11
Python
23
espeak_wrapper.py
def backend(self, backend): if backend not in ["espeak", "espeak-ng"]: raise Exception("Unknown backend: %s" % backend) self._ESPEAK_LIB = backend self._ESPEAK_VER = get_espeakng_version() if backend == "espeak-ng" else get_espeak_version()
fdeefcc6126dfe1382696d9105992295883be0a7
44
https://github.com/coqui-ai/TTS.git
58
def backend(self, backend): if backend not in ["espeak", "espeak-ng"]: raise Exception("Unknown backend: %s" % backend) self._ESPEAK_LIB = backend
7
76
backend
166
0
14
47
erpnext/selling/report/customer_acquisition_and_loyalty/customer_acquisition_and_loyalty.py
68,315
fix: bulk fix (~330) missing translations
erpnext
16
Python
113
customer_acquisition_and_loyalty.py
def get_data_by_territory(filters, common_columns): columns = [ { "label": _("Territory"), "fieldname": "territory", "fieldtype": "Link", "options": "Territory", "width": 150, } ] columns += common_columns customers_in = get_customer_stats(filters, tree_view=True) territory_dict = {} for t in frappe.db.sql( , as_dict=1 ): territory_dict.update({t.name: {"parent": t.parent_territory, "is_group": t.is_group}}) depth_map = frappe._dict() for name, info in territory_dict.items(): default = depth_map.get(info["parent"]) + 1 if info["parent"] else 0 depth_map.setdefault(name, default) data = [] for name, indent in depth_map.items(): condition = customers_in.get(name) new = customers_in[name]["new"] if condition else [0, 0.0] repeat = customers_in[name]["repeat"] if condition else [0, 0.0] temp = { "territory": name, "parent_territory": territory_dict[name]["parent"], "indent": indent, "new_customers": new[0], "repeat_customers": repeat[0], "total": new[0] + repeat[0], "new_customer_revenue": new[1], "repeat_customer_revenue": repeat[1], "total_revenue": new[1] + repeat[1], "bold": 0 if indent else 1, } data.append(temp) loop_data = sorted(data, key=lambda k: k["indent"], reverse=True) for ld in loop_data: if ld["parent_territory"]: parent_data = [x for x in data if x["territory"] == ld["parent_territory"]][0] for key in parent_data.keys(): if key not in ["indent", "territory", "parent_territory", "bold"]: parent_data[key] += ld[key] return columns, data, None, None, None, 1
a896895a9e76a68ab055ce7871bb9d181d3fac15
385
https://github.com/frappe/erpnext.git
119
def get_data_by_territory(filters, common_columns): columns = [ { "label": _("Territory"), "fieldname": "territory", "fieldtype": "Link", "options": "Territory", "width": 150, } ] columns += common_columns customers_in = get_customer_stats(filters, tree_view=True) territory_dict = {} for t in frappe.db.sql( , as_dict=1 ): territory_dict.update({t.name: {"parent": t.parent_territory, "is_group": t.is_group}}) depth_map = frappe._dict() for name, info in te
41
613
get_data_by_territory
28
1
2
9
tests/unit/test_helper.py
10,786
fix: random port assignment (#4139) * fix: assign only unique ports * fix: check for none ports * fix: use port as int * fix: change debug out * fix: add more debug out * fix: protect partiald port finding * fix: track partiald ports * fix: move partial ports up * fix: lock as cls var * fix: more debug stuff * fix: more log output * fix: remove get * fix: try again on docker fail * Revert "fix: try again on docker fail" This reverts commit c2947ee5c824fb8133319c26be4eb3de36ae7925. * fix: add more debug * fix: try connect with socket * style: fix overload and cli autocomplete * fix: set min port env in ci * fix: set min port in jinad * fix: port helper test * fix: keep track of port in * fix: clean up * fix: remove connect check * fix: remove psutil * style: fix overload and cli autocomplete * fix: seperate jinad port range * fix: use asyncio to run jinad pea * fix: kill jinad process with fire * fix: remove codecov * fix: docker compose tests * Revert "fix: remove codecov" This reverts commit 31d0d41e882699656f5b109ff7d747bf74b47971. * fix: upgrade codecov action * fix: clean up * fix: remove codecov * fix: readd code cov * fix: increase timeout for k8s test * fix: wrong cov tag * Revert "fix: wrong cov tag" This reverts commit 00ce072dd1eb5a84b19c0d3f3eafb5ebf8c1ae53. * Revert "fix: increase timeout for k8s test" This reverts commit 9b0e3134489fc90953beed0c2ff1393e0abcf26d. * fix: reset ci file * fix: readd port config * fix: use run_async helper again * fix: dont touch import Co-authored-by: Jina Dev Bot <dev-bot@jina.ai>
jina
12
Python
20
test_helper.py
def test_random_port_unique(config): reset_ports() assert os.environ['JINA_RANDOM_PORT_MIN'] generated_ports = set() for i in range(1000): port = random_port() assert port not in generated_ports assert int(os.environ['JINA_RANDOM_PORT_MIN']) <= port <= 65535 generated_ports.add(port) @pytest.fixture
f4f8f314481dcdec3bc8d322012a1942303d768f
@pytest.fixture
58
https://github.com/jina-ai/jina.git
66
def test_random_port_unique(config): reset_ports() assert os.environ['JINA_RANDOM_PORT_MIN'] generated_ports = set() for i in range(1000): port = random_port() assert port not in generated_ports assert int(os.environ['JINA_RANDOM_PORT_MIN']) <= port <= 65535 generated_ports.add(port) @pyte
15
103
test_random_port_unique
6
0
1
3
modin/test/exchange/dataframe_protocol/pandas/test_protocol.py
154,130
FIX-#4652: Support categorical data in `from_dataframe` (#4737) Signed-off-by: Karthik Velayutham <vkarthik@ponder.io>
modin
10
Python
6
test_protocol.py
def test_simple_import(): modin_df = pd.DataFrame(test_data["int_data"]) eval_df_protocol(modin_df)
8521bbe63f15fbfc6c86a9d5a3c99112738ce7fd
19
https://github.com/modin-project/modin.git
11
def test_simple_import(): modin_df = pd.DataFrame(test_data["int_data"]) eval_df_protocol(modin_df
6
34
test_simple_import
95
0
1
25
Tests/test_image_point.py
242,931
Support more affine expression forms in Image.point In modes I and F, Image.point only supported affine expressions of the forms (lambda x:) x * a, x + a, and x * a + b. Expressions like 1 - x had to be written x * -1 + 1. This rewrite, though still limited to affine transformations, supports far more expression forms, including 1 - x, (2 * x + 1) / 3, etc.
Pillow
13
Python
30
test_image_point.py
def test_sanity(): im = hopper() with pytest.raises(ValueError): im.point(list(range(256))) im.point(list(range(256)) * 3) im.point(lambda x: x) im.point(lambda x: x * 1.2) im = im.convert("I") with pytest.raises(ValueError): im.point(list(range(256))) im.point(lambda x: x * 1) im.point(lambda x: x + 1) im.point(lambda x: x * 1 + 1) im.point(lambda x: 0.1 + 0.2 * x) im.point(lambda x: -x) im.point(lambda x: x - 0.5) im.point(lambda x: 1 - x / 2) im.point(lambda x: (2 + x) / 3) im.point(lambda x: 0.5) with pytest.raises(TypeError): im.point(lambda x: x * x) with pytest.raises(TypeError): im.point(lambda x: 1 / x) with pytest.raises(TypeError): im.point(lambda x: x // 2)
4e12ccc63e40a9b567af3b2e1ac821f5157cddc6
262
https://github.com/python-pillow/Pillow.git
186
def test_sanity(): im = hopper() with pytest.raises(ValueError): im.point(list(range(256))) im.point(list(range(256)) * 3) im.point(lambda x: x) im.point(lambda x: x * 1.2) im = im.convert("I") with pytest.raises(ValueE
12
430
test_sanity
85
0
3
22
tests/test_deployments.py
54,773
Update tests
prefect
10
Python
48
test_deployments.py
async def test_multiple_specs_from_yaml(self): specs = deployment_specs_from_yaml(TEST_FILES_DIR / "multiple-deployments.yaml") assert len(specs) == 2 specs_by_name = {spec.name: spec for spec in specs} assert set(specs_by_name.keys()) == { "hello-sun-deployment", "hello-moon-deployment", } sun_deploy = specs_by_name["hello-sun-deployment"] moon_deploy = specs_by_name["hello-moon-deployment"] assert sun_deploy.flow_location == str(TEST_FILES_DIR / "multiple_flows.py") assert sun_deploy.flow_name == "hello-sun" assert moon_deploy.flow_location == str(TEST_FILES_DIR / "multiple_flows.py") assert moon_deploy.flow_name == "hello-moon" sun_src = specs[sun_deploy] moon_src = specs[moon_deploy] assert sun_src["file"] == str(TEST_FILES_DIR / "multiple-deployments.yaml") assert moon_src["file"] == str(TEST_FILES_DIR / "multiple-deployments.yaml") assert sun_src["line"] == 1 assert moon_src["line"] == 5 for spec in specs: await spec.validate()
262f05bece5560d0e8cfc36daa6403a67239f825
156
https://github.com/PrefectHQ/prefect.git
243
async def test_multiple_specs_from_yaml(self): specs = deployment_specs_from_yaml(TEST_FILES_DIR / "multiple-deployments.yaml")
19
269
test_multiple_specs_from_yaml
87
0
2
18
networkx/algorithms/community/tests/test_louvain.py
176,256
Add weights to karate club graph (#5285) Add weights to the karate_club_graph. Modifies `non_randomness` and `naive_greedy_modularity_communities` to accept a `weight` parameter and modifies tests that use the kcg accordingly Co-authored-by: Kevin Berry <kevin.berry@worthix.com> Co-authored-by: Dan Schult <dschult@colgate.edu>
networkx
12
Python
65
test_louvain.py
def test_none_weight_param(): G = nx.karate_club_graph() nx.set_edge_attributes( G, {edge: i * i for i, edge in enumerate(G.edges)}, name="foo" ) part = [ {0, 1, 2, 3, 7, 9, 11, 12, 13, 17, 19, 21}, {16, 4, 5, 6, 10}, {23, 25, 27, 28, 24, 31}, {32, 33, 8, 14, 15, 18, 20, 22, 26, 29, 30}, ] partition1 = louvain_communities(G, weight=None, seed=2) partition2 = louvain_communities(G, weight="foo", seed=2) partition3 = louvain_communities(G, weight="weight", seed=2) assert part == partition1 assert part != partition2 assert part != partition3 assert partition2 != partition3
290ebce534b84f9db20ec58b98cbb170e65a0ba1
178
https://github.com/networkx/networkx.git
157
def test_none_weight_param(): G = nx.karate_club_graph() nx.set_edge_attributes( G, {edge: i * i for i, edge in enumerate(G.edges)}, name="foo" ) part = [ {0, 1, 2, 3, 7, 9, 11, 12, 13, 17, 19, 21}, {16, 4, 5, 6, 10}, {23, 25, 27, 28, 24, 31}, {32, 33, 8, 14, 15, 18, 20, 22, 26, 29, 30}, ] partition1 = louvain_communities(G, weight=None, seed=2) partition2 = louvain_communities(G, weight="foo", seed=2) partition3 = louvain_communities(G, weight="weight", seed=2
17
239
test_none_weight_param
38
1
4
9
pipenv/utils/shell.py
19,802
Issue 4993 Add standard pre commit hooks and apply linting. (#4994) * Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.
pipenv
13
Python
32
shell.py
def find_requirements(max_depth=3): i = 0 for c, _, _ in walk_up(os.getcwd()): i += 1 if i < max_depth: r = os.path.join(c, "requirements.txt") if os.path.isfile(r): return r raise RuntimeError("No requirements.txt found!") # Borrowed from Pew. # See https://github.com/berdario/pew/blob/master/pew/_utils.py#L82 @contextmanager
9a3b3ce70621af6f9adaa9eeac9cf83fa149319c
@contextmanager
64
https://github.com/pypa/pipenv.git
98
def find_requirements(max_depth=3): i = 0 for c, _, _ in walk_u
14
113
find_requirements
24
0
1
13
tests/db_functions/math/test_sin.py
202,760
Refs #33476 -- Reformatted code with Black.
django
14
Python
22
test_sin.py
def test_integer(self): IntegerModel.objects.create(small=-20, normal=15, big=-1) obj = IntegerModel.objects.annotate( small_sin=Sin("small"), normal_sin=Sin("normal"), big_sin=Sin("big"), ).first() self.assertIsInstance(obj.small_sin, float) self.assertIsInstance(obj.normal_sin, float) self.assertIsInstance(obj.big_sin, float) self.assertAlmostEqual(obj.small_sin, math.sin(obj.small)) self.assertAlmostEqual(obj.normal_sin, math.sin(obj.normal)) self.assertAlmostEqual(obj.big_sin, math.sin(obj.big))
9c19aff7c7561e3a82978a272ecdaad40dda5c00
140
https://github.com/django/django.git
119
def test_integer(self): IntegerModel.objects.create(small=-20, normal=15, big=-1) obj = IntegerModel.objects.annotate( small_sin=Sin("small"), normal_sin=Sin("normal"), big_sin=Sin("big"), ).first() self.assertIsInstance(obj.small_sin, float) self.assertIsInstance(obj.normal_sin, float) self.assertIsInstance(obj.big_sin, float) self.assertAlmostEqual(obj.small_sin, math.sin(obj.sma
20
220
test_integer
34
0
1
12
saleor/core/tests/test_dataloaders.py
29,740
Add plugin manager promise (#11414)
saleor
10
Python
27
test_dataloaders.py
def test_plugins_manager_loader_loads_requestor_in_plugin(rf, customer_user, settings): settings.PLUGINS = ["saleor.plugins.tests.sample_plugins.ActivePlugin"] request = rf.request() request.user = customer_user request.app = None handler = BaseHandler() handler.load_middleware() handler.get_response(request) manager = get_plugin_manager_promise(request).get() plugin = manager.all_plugins.pop() assert isinstance(plugin.requestor, type(customer_user)) assert plugin.requestor.id == customer_user.id
decd505f55d02c616ce5b804c06a71e120d15c15
90
https://github.com/saleor/saleor.git
66
def test_plugins_manager_loader_loads_requestor_in_plugin(rf, customer_user, settings): settings.PLUGINS = ["saleor.plugins.tests.sample_plugins.ActivePlugin"] request = rf.request() request.user = customer_user request.app = None handler = BaseHandler() handler.load_middleware() handler.get_response(request) manager = get_plugin_manager_pro
22
147
test_plugins_manager_loader_loads_requestor_in_plugin
115
0
15
34
cps/comic.py
172,633
Bugfix for cbr support without comicapi
calibre-web
19
Python
51
comic.py
def _extract_Cover_from_archive(original_file_extension, tmp_file_name, rarExecutable): cover_data = extension = None if original_file_extension.upper() == '.CBZ': cf = zipfile.ZipFile(tmp_file_name) for name in cf.namelist(): ext = os.path.splitext(name) if len(ext) > 1: extension = ext[1].lower() if extension in COVER_EXTENSIONS: cover_data = cf.read(name) break elif original_file_extension.upper() == '.CBT': cf = tarfile.TarFile(tmp_file_name) for name in cf.getnames(): ext = os.path.splitext(name) if len(ext) > 1: extension = ext[1].lower() if extension in COVER_EXTENSIONS: cover_data = cf.extractfile(name).read() break elif original_file_extension.upper() == '.CBR' and use_rarfile: try: rarfile.UNRAR_TOOL = rarExecutable cf = rarfile.RarFile(tmp_file_name) for name in cf.namelist(): ext = os.path.splitext(name) if len(ext) > 1: extension = ext[1].lower() if extension in COVER_EXTENSIONS: cover_data = cf.read(name) break except Exception as ex: log.debug('Rarfile failed with error: %s', ex) return cover_data, extension
8007e450b3178f517b83b0989744c6df38867932
248
https://github.com/janeczku/calibre-web.git
509
def _extract_Cover_from_archive(original_file_extension, tmp_file_name, rarExecutable): cover_data = extension = None if original_file_extension.upper() == '.CBZ': cf = zipfile.ZipFile(tmp_file_name) for name in cf.namelist(): ext = os.path.splitext(name) if len(ext) > 1: extension = ext[1].lower() if extension in COVER_EXTENSIONS: cover_data =
32
408
_extract_Cover_from_archive
26
0
5
6
modin/config/__main__.py
154,081
REFACTOR-#4629: Add type annotations to `modin/config` (#4685) Signed-off-by: Karthik Velayutham <vkarthik@ponder.io>
modin
15
Python
25
__main__.py
def print_config_help() -> None: for objname in sorted(globals()): obj = globals()[objname] if isinstance(obj, type) and issubclass(obj, Parameter) and not obj.is_abstract: print(f"{obj.get_help()}\n\tCurrent value: {obj.get()}") # noqa: T201
02363589aa5105e091fa3d790b29cddf94cc8118
50
https://github.com/modin-project/modin.git
58
def print_config_help() -> None: for objname in sorted(globals()): obj = globals()[objname] if isinstance(obj, type) and issubcla
13
108
print_config_help
20
0
2
8
gamestonk_terminal/cryptocurrency/due_diligence/dd_controller.py
280,875
reset different coin from dd controller on crypto (#1118)
OpenBBTerminal
12
Python
14
dd_controller.py
def call_reset(self, _): self.queue.insert(0, "dd") if self.current_coin: self.queue.insert(0, f"load {self.current_coin} --source {self.source}") self.queue.insert(0, "crypto") self.queue.insert(0, "reset") self.queue.insert(0, "quit") self.queue.insert(0, "quit")
041a6cd5a06af3809419d043c2410d5317799d1a
74
https://github.com/OpenBB-finance/OpenBBTerminal.git
80
def call_reset(self, _):
7
139
call_reset
19
0
2
5
sklearn/metrics/tests/test_pairwise_distances_reduction.py
260,677
FEA Add support for float32 on `PairwiseDistancesReduction` using Tempita (#23865) Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org>
scikit-learn
15
Python
18
test_pairwise_distances_reduction.py
def relative_rounding(scalar, n_significant_digits): if scalar == 0: return 0.0 magnitude = int(floor(log10(abs(scalar)))) + 1 return round(scalar, n_significant_digits - magnitude)
b7d01716216042dda9663f1732d8419e62858a1e
43
https://github.com/scikit-learn/scikit-learn.git
38
def relative_rounding(scalar, n_significant_digits): if scalar == 0: return 0.0 magnitude = int(floor(log10(abs(scalar)))) + 1 return round(scalar, n_signifi
9
67
relative_rounding
70
0
7
23
kittens/ask/main.py
103,676
ask kitten: allow having hidden text in the message
kitty
16
Python
48
main.py
def draw_screen(self) -> None: self.cmd.clear_screen() msg_lines: List[str] = [] if self.message: for line in self.message.splitlines(): msg_lines.extend(self.draw_long_text(line)) y = self.screen_size.rows - len(msg_lines) y = max(0, (y // 2) - 2) self.print(end='\r\n'*y) for line in msg_lines: if self.replacement_text in line: idx = line.find(self.replacement_text) x = wcswidth(line[:idx]) self.replacement_range = Range(x, x + wcswidth(self.replacement_text), y) self.print(line) y += 1 if self.screen_size.rows > 2: self.print() y += 1 if self.cli_opts.type == 'yesno': self.draw_yesno(y) else: self.draw_choice(y)
bfcd3249930a46c3ba5c53e48e182136809fb6e8
181
https://github.com/kovidgoyal/kitty.git
287
def draw_screen(self) -> None: self.cmd.clear_screen() msg_lines: List[str] = [] if self.message: for line in self.message.splitlines(): msg_lines.extend(self.draw_long_text(line)) y = self.screen_size.rows - len(msg_lines) y = max(0, (y // 2) - 2) self.print(end='\r\n'*y) for line in msg_lines: if self.replacement_text in line: idx = line.find(self.replacement_text) x = wcswidth(line[:idx]) self.replacement_range = Range(x
30
302
draw_screen
367
0
14
51
python/ccxt/huobi.py
16,076
1.68.88 [ci skip]
ccxt
20
Python
159
huobi.py
def cancel_orders(self, ids, symbol=None, params={}): self.load_markets() marketType = None marketType, params = self.handle_market_type_and_params('cancelOrder', None, params) request = { # spot ----------------------------------------------------------- # 'order-ids': ids.jsoin(','), # max 50 # 'client-order-ids': ','.join(ids), # max 50 # contracts ------------------------------------------------------ # 'order_id': id, # comma separated, max 10 # 'client_order_id': clientOrderId, # comma separated, max 10 # 'contract_code': market['id'], # 'symbol': market['settleId'], } method = None if marketType == 'spot': clientOrderIds = self.safe_value_2(params, 'client-order-id', 'clientOrderId') clientOrderIds = self.safe_value_2(params, 'client-order-ids', 'clientOrderIds', clientOrderIds) if clientOrderIds is None: if isinstance(clientOrderIds, basestring): request['order-ids'] = ids else: request['order-ids'] = ','.join(ids) else: if isinstance(clientOrderIds, basestring): request['client-order-ids'] = clientOrderIds else: request['client-order-ids'] = ','.join(clientOrderIds) params = self.omit(params, ['client-order-id', 'client-order-ids', 'clientOrderId', 'clientOrderIds']) method = 'spotPrivatePostV1OrderOrdersBatchcancel' else: if symbol is None: raise ArgumentsRequired(self.id + ' cancelOrders() requires a symbol for ' + marketType + ' orders') market = self.market(symbol) request['contract_code'] = market['id'] if market['linear']: defaultMargin = 'cross' if market['future'] else 'isolated' marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin) if marginType == 'isolated': method = 'contractPrivatePostLinearSwapApiV1SwapCancel' elif marginType == 'cross': method = 'contractPrivatePostLinearSwapApiV1SwapCrossCancel' elif market['inverse']: if market['future']: method = 'contractPrivatePostApiV1ContractCancel' request['symbol'] = market['settleId'] elif market['swap']: method = 'contractPrivatePostSwapApiV1SwapCancel' else: raise NotSupported(self.id + ' cancelOrders() does not support ' + marketType + ' markets') clientOrderIds = self.safe_string_2(params, 'client_order_id', 'clientOrderId') clientOrderIds = self.safe_string_2(params, 'client_order_ids', 'clientOrderIds', clientOrderIds) if clientOrderIds is None: request['order_id'] = ','.join(ids) else: request['client_order_id'] = clientOrderIds params = self.omit(params, ['client_order_id', 'client_order_ids', 'clientOrderId', 'clientOrderIds']) response = getattr(self, method)(self.extend(request, params)) # # spot # # { # "status": "ok", # "data": { # "success": [ # "5983466" # ], # "failed": [ # { # "err-msg": "Incorrect order state", # "order-state": 7, # "order-id": "", # "err-code": "order-orderstate-error", # "client-order-id": "first" # }, # { # "err-msg": "Incorrect order state", # "order-state": 7, # "order-id": "", # "err-code": "order-orderstate-error", # "client-order-id": "second" # }, # { # "err-msg": "The record is not found.", # "order-id": "", # "err-code": "base-not-found", # "client-order-id": "third" # } # ] # } # } # # contracts # # { # "status": "ok", # "data": { # "errors": [ # { # "order_id": "769206471845261312", # "err_code": 1061, # "err_msg": "This order doesnt exist." # } # ], # "successes": "773120304138219520" # }, # "ts": 1604367997451 # } # return response
7a8a00ecacc1eb719ced2f95c06a472a8b55892c
379
https://github.com/ccxt/ccxt.git
2,109
def cancel_orders(self, ids, symbol=None, params={}): self.load_markets() marketType = None marketType, params = self.handle_market_type_and_params('cancelOrder', None, params) request = { # spot ----------------------------------------------------------- # 'order-ids': ids.jsoin(','), # max 50 # 'client-order-ids': ','.join(ids), # max 50 # contracts ------------------------------------------------------ # 'order_id': id, # comma separated, max 10 # 'client_order_id': clientOrderId, # comma separated, max 10 # 'contract_code': market['id'], # 'symbol': market['settleId'], } method = None if marketType == 'spot': clientOrderIds = self.safe_value_2(params, 'client-order-id', 'clientOrderId') clientOrderIds = self.safe_value_2(params, 'client-order-ids', 'clientOrderIds', clientOrderIds) if clientOrderIds is None: if isinstance(clientOrderIds, basestring): request['order-ids'] = ids else: request['order-ids'] = ','.join(ids) else: if isinstance(clientOrderIds, basestring): request['client-order-ids'] = clientOrderIds else: request['client-order-ids'] = ','.join(clientOrderIds) params = self.omit(params, ['client-order-id', 'client-order-ids', 'clientOrderId', 'clientOrderIds']) method = 'spotPrivatePostV1OrderOrdersBatchcancel' else: if symbol is None: raise ArgumentsRequired(self.id + ' cancelOrders() requires a symbol for ' + marketType + ' orders') market = self.market(symbol) request['contract_code'] = market['id'] if market['linear']: defaultMargin = 'cross' if market['future'] else 'isolated' marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin) if marginType == 'isolated': method = 'contractPrivatePostLinearSwapApiV1SwapCancel' elif marginType == 'cross': method = 'contractPrivatePostLinearSwapApiV1SwapCrossCancel' elif market['inverse']: if market['future']: method = 'contractPrivatePostApiV1ContractCancel' request['symbol'] = market['settleId'] elif market['swap']: method = 'contractPrivatePostSwapApiV1SwapCancel' else: raise NotSupported(self.id + ' cancelOrders() does not support ' + marketType + ' markets') clientOrderIds = self.safe_string_2(params, 'client_order_id', 'clientOrderId') clientOrderIds = self.safe_string_2(params, 'client_order_ids', 'clientOrderIds', clientOrderIds) if clientOrderIds is None: request['order_id'] = ','.join(ids) else: request['client_order_id'] = clientOrderIds params = self.omit(params, ['client_order_id', 'client_order_ids', 'clientOrderId', 'clientOrderIds']) response = getattr(self, method)(self.extend(request, params)) # # spot # # { # "status": "ok", # "data": { # "success": [ # "5983466" # ], # "failed": [ # { # "err-msg": "Incorrect order state", # "order-state": 7, # "order-id": "", # "err-code": "order-orderstate-error", # "client-order-id": "first" # }, # { # "err-msg": "Incorrect order state", # "order-state": 7, # "order-id": "", # "err-code": "order-orderstate-error", #
27
730
cancel_orders
58
0
3
30
zerver/tests/test_auth_backends.py
84,401
test_auth_backends: Extract external_auth_backends. Signed-off-by: Zixuan James Li <p359101898@gmail.com>
zulip
16
Python
51
test_auth_backends.py
def test_get_external_method_dicts_correctly_sorted(self) -> None: with self.settings( AUTHENTICATION_BACKENDS=( "zproject.backends.EmailAuthBackend", "zproject.backends.GitHubAuthBackend", "zproject.backends.GoogleAuthBackend", "zproject.backends.ZulipRemoteUserBackend", "zproject.backends.SAMLAuthBackend", "zproject.backends.AzureADAuthBackend", ), ): external_auth_methods = get_external_method_dicts() external_auth_backends: List[Type[ExternalAuthMethod]] = [ ZulipRemoteUserBackend, GitHubAuthBackend, AzureADAuthBackend, GoogleAuthBackend, ] # First backends in the list should be SAML: self.assertIn("saml:", external_auth_methods[0]["name"]) self.assertEqual( [social_backend["name"] for social_backend in external_auth_methods[1:]], [ social_backend.name for social_backend in sorted( external_auth_backends, key=lambda x: x.sort_order, reverse=True, ) ], )
51df4031ac45ae6fcc6d2ccea22eed897116c582
117
https://github.com/zulip/zulip.git
491
def test_get_external_method_dicts_correctly_sorted(self) -> None: with self.settings( AUTHENTICATION_BACKENDS=( "zproject.backends.EmailAuthBackend", "zproject.backends.GitHubAuthBackend", "zproject.backends.GoogleAuthBackend", "zproject.backends.ZulipRemoteUserBackend", "zproject.backends.SAMLAuthBackend", "zproject.backends.AzureADAuthBackend", ), ): external_auth_methods = get_external_method_dicts() external_auth_backends: List[Type[ExternalAuthMethod]] = [ ZulipRemoteUserBackend, GitHubAuthBackend, AzureADAuthBackend,
23
185
test_get_external_method_dicts_correctly_sorted
17
0
1
12
homeassistant/components/tado/sensor.py
306,498
Improve entity type hints [t] (#77883)
core
13
Python
16
sensor.py
async def async_added_to_hass(self) -> None: self.async_on_remove( async_dispatcher_connect( self.hass, SIGNAL_TADO_UPDATE_RECEIVED.format( self._tado.home_id, "weather", "data" ), self._async_update_callback, ) ) self._async_update_home_data()
458001a06e7678273ea43c33e55b833adadced9e
44
https://github.com/home-assistant/core.git
146
async def async_added_to_hass(self) -> None: self.async_on_remove( async_dispatcher_connect( self.hass, SIGNAL_TADO_UPDATE_RECEIVED.format( self._tado.home_id, "weather", "data" ), self._async_update_callback, ) ) self._async_update_home_data()
11
74
async_added_to_hass
11
0
2
3
.venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py
63,218
upd; format
transferlearning
9
Python
11
__init__.py
def obtain(self, requirement, installer=None): if installer is not None: return installer(requirement)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
23
https://github.com/jindongwang/transferlearning.git
36
def obtain(self, requirement, installer=None): if installer is not None: return installer(requirement)
4
37
obtain
48
0
4
14
dashboard/modules/job/common.py
145,238
[jobs] Rename JobData -> JobInfo (#22499) `JobData` could be confused with the actual output data of a job, `JobInfo` makes it more clear that this is status information + metadata.
ray
14
Python
38
common.py
def get_all_jobs(self) -> Dict[str, JobInfo]: raw_job_ids_with_prefixes = _internal_kv_list( self.JOB_DATA_KEY_PREFIX, namespace=ray_constants.KV_NAMESPACE_JOB ) job_ids_with_prefixes = [ job_id.decode() for job_id in raw_job_ids_with_prefixes ] job_ids = [] for job_id_with_prefix in job_ids_with_prefixes: assert job_id_with_prefix.startswith( self.JOB_DATA_KEY_PREFIX ), "Unexpected format for internal_kv key for Job submission" job_ids.append(job_id_with_prefix[len(self.JOB_DATA_KEY_PREFIX) :]) return {job_id: self.get_info(job_id) for job_id in job_ids}
58e5f0140d247059ca45b249446614929930c126
89
https://github.com/ray-project/ray.git
166
def get_all_jobs(self) -> Dict[str, JobInfo]: raw_job_ids_with_prefixes = _internal_kv_list( self.JOB_DATA_KEY_PREFIX, namespace=ray_constants.KV_NAMESPACE_JOB ) job_ids_with_prefixe
20
137
get_all_jobs
21
1
1
9
autokeras/tasks/image_test.py
175,895
mvoing tests (#1664) Co-authored-by: Haifeng Jin <haifeng-jin@users.noreply.github.com>
autokeras
12
Python
19
image_test.py
def test_img_seg_fit_call_auto_model_fit(fit, tmp_path): auto_model = ak.tasks.image.ImageSegmenter( directory=tmp_path, seed=test_utils.SEED ) auto_model.fit( x=test_utils.generate_data(num_instances=100, shape=(32, 32, 3)), y=test_utils.generate_data(num_instances=100, shape=(32, 32)), ) assert fit.is_called @mock.patch("autokeras.AutoModel.fit")
b349e06f3b6e80ba527347b2b0463bcc403ae8c5
@mock.patch("autokeras.AutoModel.fit")
76
https://github.com/keras-team/autokeras.git
55
def test_img_seg_fit_call_auto_model_fit(fit, tmp_path): auto_model = ak.tasks.image.ImageSegmenter( directory=tmp_path, seed=test_utils.SEED ) auto_model.fit( x=test_utils.generate_data(num_instances=100, shape=(32, 32, 3)), y=test_utils.generate_data(num_instances=100, shape=(32, 32)), ) assert fit.is_called
20
124
test_img_seg_fit_call_auto_model_fit
130
0
2
54
tests/integration_tests/test_visualization.py
6,163
Renamed "training" to "trainer" and "numerical" to "number" (#1743)
ludwig
11
Python
99
test_visualization.py
def test_visualization_binary_threshold_vs_metric_output_saved(csv_filename): input_features = [ text_feature(vocab_size=10, min_len=1, encoder="stacked_cnn"), number_feature(), category_feature(vocab_size=10, embedding_size=5), set_feature(), sequence_feature(vocab_size=10, max_len=10, encoder="embed"), ] output_features = [category_feature(vocab_size=4, reduce_input="sum")] # Generate test data rel_path = generate_data(input_features, output_features, csv_filename) input_features[0]["encoder"] = "parallel_cnn" exp_dir_name = run_experiment_with_visualization(input_features, output_features, dataset=rel_path) vis_output_pattern_pdf = os.path.join(exp_dir_name, "*.pdf") vis_output_pattern_png = os.path.join(exp_dir_name, "*.png") output_feature_name = get_output_feature_name(exp_dir_name) probability = os.path.join(exp_dir_name, PREDICTIONS_PARQUET_FILE_NAME) experiment_source_data_name = csv_filename.split(".")[0] ground_truth = experiment_source_data_name + ".csv" split_file = experiment_source_data_name + ".split.csv" test_cmd_pdf = [ "python", "-m", "ludwig.visualize", "--visualization", "binary_threshold_vs_metric", "--positive_label", "2", "--metrics", "accuracy", "--ground_truth", ground_truth, "--output_feature_name", output_feature_name, "--split_file", split_file, "--ground_truth_metadata", exp_dir_name + "/model/training_set_metadata.json", "--probabilities", probability, probability, "--model_names", "Model1", "Model2", "-od", exp_dir_name, ] test_cmd_png = test_cmd_pdf.copy() + ["-ff", "png"] commands = [test_cmd_pdf, test_cmd_png] vis_patterns = [vis_output_pattern_pdf, vis_output_pattern_png] for command, viz_pattern in zip(commands, vis_patterns): result = subprocess.run(command) figure_cnt = glob.glob(viz_pattern) assert 0 == result.returncode assert 1 == len(figure_cnt)
6ec371f2201c8b0a13b5719d59a19da94aaa09de
291
https://github.com/ludwig-ai/ludwig.git
431
def test_visualization_binary_threshold_vs_metric_output_saved(csv_filename): input_features = [ text_feature(vocab_size=10, min_len=1, encoder="stacked_cnn"), number_feature(), category_feature(vocab_size=10, embedding_size=5), set_feature(), sequence_feature(vocab_size=10, max_len=10, encoder="embed"), ] output_features = [category_feature(vocab_size=4, reduce_input="sum")] # Generate test data rel_path = generate_data(input_features, output_features, csv_filename) input_features[0]["encoder"] = "parallel_cnn" exp_dir_name = run_experiment_with_visualization(input_features, output_features, dataset=rel_path) vis_output_pattern_pdf = os.path.join(exp_dir_name, "*.pdf") vis_output_pattern_png = os.path.join(exp_dir_name, "*.png") output_feature_name = get_output_feature_name(exp_dir_name) probability = os.path.join(exp_dir_name, PREDICTIONS_PARQUET_FILE_NAME) experiment_source_data_name = csv_filename.split(".")[0] ground_truth = experiment_source_data_name + ".csv" split_file =
48
475
test_visualization_binary_threshold_vs_metric_output_saved
29
0
2
7
wagtail/contrib/settings/tests/generic/test_model.py
78,285
Add generic settings to compliment site-specific settings (#8327)
wagtail
14
Python
23
test_model.py
def test_get_page_url_returns_empty_string_if_attribute_value_not_a_page(self): settings = self._create_importantpagesgenericsetting_object() for value in (None, self.default_site): with self.subTest(attribute_value=value): settings.test_attribute = value # when called directly self.assertEqual(settings.get_page_url("test_attribute"), "") # when called indirectly via shortcut self.assertEqual(settings.page_url.test_attribute, "")
d967eccef28ce47f60d26be1c28f2d83a25f40b0
63
https://github.com/wagtail/wagtail.git
128
def test_get_page_url_returns_empty_string_if_attribute_value_not_a_page(self): settings = self._create_importantpagesgenericsetting_object() for value in (None, self.default_site): with self.subTest(attribute_value=value): settings.test
12
108
test_get_page_url_returns_empty_string_if_attribute_value_not_a_page
121
0
12
34
pandas/io/excel/_odfreader.py
171,315
REF change to_datetime -> Timestamp for in odf reader (#49712) change to_datetime -> Timestamp for single value Co-authored-by: MarcoGorelli <>
pandas
15
Python
65
_odfreader.py
def _get_cell_value(self, cell) -> Scalar | NaTType: from odf.namespaces import OFFICENS if str(cell) == "#N/A": return np.nan cell_type = cell.attributes.get((OFFICENS, "value-type")) if cell_type == "boolean": if str(cell) == "TRUE": return True return False if cell_type is None: return self.empty_value elif cell_type == "float": # GH5394 cell_value = float(cell.attributes.get((OFFICENS, "value"))) val = int(cell_value) if val == cell_value: return val return cell_value elif cell_type == "percentage": cell_value = cell.attributes.get((OFFICENS, "value")) return float(cell_value) elif cell_type == "string": return self._get_cell_string_value(cell) elif cell_type == "currency": cell_value = cell.attributes.get((OFFICENS, "value")) return float(cell_value) elif cell_type == "date": cell_value = cell.attributes.get((OFFICENS, "date-value")) return pd.Timestamp(cell_value) elif cell_type == "time": stamp = pd.Timestamp(str(cell)) # cast needed here because Scalar doesn't include datetime.time return cast(Scalar, stamp.time()) else: self.close() raise ValueError(f"Unrecognized type {cell_type}")
86f182829d2cfe2f4c380d7f2ecd6ea27d6e0f1d
232
https://github.com/pandas-dev/pandas.git
465
def _get_cell_value(self, cell) -> Scalar | NaTType: from odf.namespaces import OFFICENS if str(cell) == "#N/A": return np.nan cell_type = cell.attributes.get((OFFICENS, "value-type")) if cell_type == "boolean": if str(cell) == "TRUE": return True return False if cell_type is N
27
398
_get_cell_value
29
1
1
7
airbyte-integrations/connectors/source-github/unit_tests/test_stream.py
4,331
🐛 Source Github: test coverage more than 90% (#10967) Signed-off-by: Sergey Chvalyuk <grubberr@gmail.com>
airbyte
11
Python
24
test_stream.py
def test_stream_organizations_read(): organization_args = {"organizations": ["org1", "org2"]} stream = Organizations(**organization_args) responses.add("GET", "https://api.github.com/orgs/org1", json={"id": 1}) responses.add("GET", "https://api.github.com/orgs/org2", json={"id": 2}) records = read_full_refresh(stream) assert records == [{"id": 1}, {"id": 2}] @responses.activate
0f475ce6ff95c4fc1e6793edd8d88861c1e8f60a
@responses.activate
76
https://github.com/airbytehq/airbyte.git
45
def test_stream_organizations_read(): organization_args = {"organizations": ["org1", "org2"]} stream = Organizations(**organization_args) responses.add("GET", "https://api.github.com/orgs/org1", json={"id": 1}) responses.add("GET", "https://api.github.com/or
10
144
test_stream_organizations_read
10
0
1
6
tests/federation/transport/test_server.py
246,319
Tests: replace mocked Authenticator with the real thing (#11913) If we prepopulate the test homeserver with a key for a remote homeserver, we can make federation requests to it without having to stub out the authenticator. This has two advantages: * means that what we are testing is closer to reality (ie, we now have complete tests for the incoming-request-authorisation flow) * some tests require that other objects be signed by the remote server (eg, the event in `/send_join`), and doing that would require a whole separate set of mocking out. It's much simpler just to use real keys.
synapse
9
Python
10
test_server.py
def test_open_public_room_list_over_federation(self): channel = self.make_signed_federation_request( "GET", "/_matrix/federation/v1/publicRooms", ) self.assertEquals(200, channel.code)
c3db7a0b59d48b8872bc24096f9a2467ef35f703
27
https://github.com/matrix-org/synapse.git
60
def test_open_public_room_list_over_federation(self): channel = self.make_signed_federation_request( "GET", "/
6
48
test_open_public_room_list_over_federation
12
0
1
4
tests/pytests/unit/utils/win_dacl/test_get_name.py
215,739
Add tests, migrate some tests to pytest
salt
10
Python
9
test_get_name.py
def test_get_name_normal_name(): result = salt.utils.win_dacl.get_name("Administrators") expected = "Administrators" assert result == expected
3bb43882e727b1d36abe2e501759c9c5e9048ecf
24
https://github.com/saltstack/salt.git
24
def test_get_name_normal_name(): result = salt.utils.win_dacl.get_name("Administrators") expected = "Administrators" assert result == expected
7
46
test_get_name_normal_name
97
0
1
17
lib/matplotlib/tests/test_axes.py
109,648
Curved polar errorbars - uses _interpolation_steps - prefers transform MarkerStyle in init over _transform property - adjusted what's new - added more tests for overlapping, asymmetric and long errorbars - combine all tests to a single figure - remove overlappnig since it does not work same on all platforms - rework test figure, add overlapping, might work by avoiding grid - update what's new with image and link to example
matplotlib
11
Python
72
test_axes.py
def test_mixed_errorbar_polar_caps(): fig = plt.figure() ax = plt.subplot(111, projection='polar') # symmetric errorbars th_sym = [1, 2, 3] r_sym = [0.9]*3 ax.errorbar(th_sym, r_sym, xerr=0.35, yerr=0.2, fmt="o") # long errorbars th_long = [np.pi/2 + .1, np.pi + .1] r_long = [1.8, 2.2] ax.errorbar(th_long, r_long, xerr=0.8 * np.pi, yerr=0.15, fmt="o") # asymmetric errorbars th_asym = [4*np.pi/3 + .1, 5*np.pi/3 + .1, 2*np.pi-0.1] r_asym = [1.1]*3 xerr = [[.3, .3, .2], [.2, .3, .3]] yerr = [[.35, .5, .5], [.5, .35, .5]] ax.errorbar(th_asym, r_asym, xerr=xerr, yerr=yerr, fmt="o") # overlapping errorbar th_over = [2.1] r_over = [3.1] ax.errorbar(th_over, r_over, xerr=10, yerr=.2, fmt="o")
907f78dbf959c0609ab484c59e840eea3eafee31
273
https://github.com/matplotlib/matplotlib.git
160
def test_mixed_errorbar_polar_caps(): fig = plt.figure() ax = plt.subplot(111, projection='polar') # symmetric errorbars th_sym = [1, 2, 3] r_sym = [0.9]*3 ax.errorbar(th_sym, r_sym, xerr=0.35, yerr=0.2, fmt="o") # long errorbars th_long = [np.pi/2 + .1, np.pi + .1] r_long = [1.8, 2.2] ax.errorbar(th_long, r_long, xerr=0.8 * np.pi, yerr=0.15, fmt="o") # asymmetric errorbars th_asym = [4*np.pi/3 + .1, 5*np.pi/3 + .1, 2*np.pi-0.1] r
21
348
test_mixed_errorbar_polar_caps
45
0
1
20
tests/components/risco/util.py
317,749
Upgrade pyrisco to 0.5.0 (#75648) * Upgrade to pyrisco 0.4.0 * Parametrized error tests in config flow * Inline error parameters * Switch to RiscoCloud
core
14
Python
30
util.py
def two_zone_alarm(): zone_mocks = {0: _zone_mock(), 1: _zone_mock()} alarm_mock = MagicMock() with patch.object( zone_mocks[0], "id", new_callable=PropertyMock(return_value=0) ), patch.object( zone_mocks[0], "name", new_callable=PropertyMock(return_value="Zone 0") ), patch.object( zone_mocks[1], "id", new_callable=PropertyMock(return_value=1) ), patch.object( zone_mocks[1], "name", new_callable=PropertyMock(return_value="Zone 1") ), patch.object( alarm_mock, "zones", new_callable=PropertyMock(return_value=zone_mocks), ), patch( "homeassistant.components.risco.RiscoCloud.get_state", return_value=alarm_mock, ): yield alarm_mock
7cf2d1759dde088105f77ca61dba8e58e3474b83
141
https://github.com/home-assistant/core.git
145
def two_zone_alarm(): zone_mocks = {0: _zone_mock(), 1: _zone_mock()} alarm_mock = MagicMock() with patch.object( zone_mocks[0], "id", new_callable=PropertyMock(return_value=0) ), patch.object( zone_mocks[0], "name", new_callable=PropertyMock(return_value="Zone 0") ), patch.object( zone_mocks[1], "id", new_callable=PropertyMock(return_value=1) ), patch.object( zone_mocks[1], "name", new_callable=PropertyMock(return_value="Zone 1") ), patch.object( alarm_mock, "zones", new_callable=PropertyMock(return_value=zone_mocks), ), patch( "homeassistant.components.risco.RiscoCloud.get_state", return_v
10
230
two_zone_alarm
150
0
14
41
networkx/readwrite/nx_shp.py
176,450
Minor improvements from general code readthrough (#5414) * Add deprecated directive to reversed docstring. * Add missing dep directives to shpfiles. * Remove defn of INF sentinel. * typo. * str -> comment in forloop. * STY: appropriate casing for var name.
networkx
20
Python
109
nx_shp.py
def read_shp(path, simplify=True, geom_attrs=True, strict=True): msg = ( "read_shp is deprecated and will be removed in 3.0." "See https://networkx.org/documentation/latest/auto_examples/index.html#geospatial." ) warnings.warn(msg, DeprecationWarning, stacklevel=2) try: from osgeo import ogr except ImportError as err: raise ImportError("read_shp requires OGR: http://www.gdal.org/") from err if not isinstance(path, str): return net = nx.DiGraph() shp = ogr.Open(path) if shp is None: raise RuntimeError(f"Unable to open {path}") for lyr in shp: fields = [x.GetName() for x in lyr.schema] for f in lyr: g = f.geometry() if g is None: if strict: raise nx.NetworkXError("Bad data: feature missing geometry") else: continue flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields] attributes = dict(zip(fields, flddata)) attributes["ShpName"] = lyr.GetName() # Note: Using layer level geometry type if g.GetGeometryType() == ogr.wkbPoint: net.add_node((g.GetPoint_2D(0)), **attributes) elif g.GetGeometryType() in (ogr.wkbLineString, ogr.wkbMultiLineString): for edge in edges_from_line(g, attributes, simplify, geom_attrs): e1, e2, attr = edge net.add_edge(e1, e2) net[e1][e2].update(attr) else: if strict: raise nx.NetworkXError( f"GeometryType {g.GetGeometryType()} not supported" ) return net
cc1db275efc709cb964ce88abbfa877798d58c10
280
https://github.com/networkx/networkx.git
573
def read_shp(path, simplify=True, geom_attrs=True, strict=True): msg = ( "read_shp is deprecated and will be removed in 3.0." "See https://networkx.org/documentation/latest/auto_examples/index.html#geospatial." ) warnings.warn(msg, DeprecationWarning, stacklevel=2) try: from osgeo import ogr except ImportError as err: raise ImportError("read_shp requires OGR: http://www.gdal.org/") from e
50
463
read_shp
34
0
1
8
seaborn/tests/_core/test_plot.py
40,630
Implement prototype Plot.pair behavior
seaborn
12
Python
26
test_plot.py
def test_col_wrapping(self): cols = list("abcd") wrap = 3 p = Plot().facet(col=cols, wrap=wrap).plot() gridspec = p._figure.axes[0].get_gridspec() assert len(p._figure.axes) == 4 assert gridspec.ncols == 3 assert gridspec.nrows == 2 # TODO test axis labels and titles
9cfdd4c5ed60562f3429386d25758476c5c57998
71
https://github.com/mwaskom/seaborn.git
89
def test_col_wrapping(self): cols = list("abcd") wrap = 3 p = Plot().facet(col=cols, wrap=wrap).plot() gridspec = p._figure.axes[0].get_gridspec() assert len(p._figure.axes) == 4 assert gridspec.ncols == 3 assert gridspec.nrows == 2 # TODO test axis labels and titles
17
116
test_col_wrapping
36
0
1
10
tests/providers/ssh/operators/test_ssh.py
44,472
Refactor SSH tests to not use SSH server in operator tests (#21326) This required a slight refactor to the SSHOperator (moving `exec_ssh_client_command` "down" in to the Hook) but the SSH _Operator_ tests now just use stubbing, and the only place that connects to a real SSH server is the one test of `test_exec_ssh_client_command` in SSHHook. This is both better structured, and hopefully produces less (or ideally no) random failures in our tests
airflow
12
Python
34
test_ssh.py
def test_command_errored(self): # Test that run_ssh_client_command works on invalid commands command = "not_a_real_command" task = SSHOperator( task_id="test", ssh_hook=self.hook, command=command, ) self.exec_ssh_client_command.return_value = (1, b'', b'Error here') with pytest.raises(AirflowException, match=f"error running cmd: {command}, error: Error here"): task.execute(None)
ab762a5a8ae147ae33500ee3c7e7a73d25d03ad7
61
https://github.com/apache/airflow.git
121
def test_command_errored(self): # Test that run_ssh_client_command works on invalid commands command = "not_a_real_command" task = SSHOperator( task_id="test", ssh_hook=self.hook, command=command, ) self.exec_ssh_client_comman
15
104
test_command_errored
25
0
2
22
release/nightly_tests/dataset/pipelined_training.py
145,168
Round robin during spread scheduling (#21303) - Separate spread scheduling and default hydra scheduling (i.e. SpreadScheduling != HybridScheduling(threshold=0)): they are already separated in the API layer and they have the different end goals so it makes sense to separate their implementations and evolve them independently. - Simple round robin for spread scheduling: this is just a starting implementation, can be optimized later. - Prefer not to spill back tasks that are waiting for args since the pull is already in progress.
ray
13
Python
24
pipelined_training.py
def create_dataset(files, num_workers=4, epochs=50, num_windows=1): if num_windows > 1: num_rows = ray.data.read_parquet( files ).count() # This should only read Parquet metadata. file_splits = np.array_split(files, num_windows)
baa14d695aafd0abb817026ddd4b4182f68f6b61
129
https://github.com/ray-project/ray.git
60
def create_dataset(files, num_workers=4, epochs=50, num_windows=1): if num_windows > 1: num_rows = ray.data.read_parquet( files ).count() # This should only read Parquet metadata. file_splits = np.array_split(files, num_windows)
13
71
create_dataset
19
0
1
4
tests/freqai/test_freqai_datakitchen.py
151,313
Fix arm test failure
freqtrade
8
Python
19
test_freqai_datakitchen.py
def test_use_DBSCAN_to_remove_outliers(mocker, freqai_conf, caplog): freqai = make_data_dictionary(mocker, freqai_conf) # freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 1}) freqai.dk.use_DBSCAN_to_remove_outliers(predict=False) assert log_has_re(r"DBSCAN found eps of 2\.3\d\.", caplog)
041258a5499534fae6de843a7b9d10dde02c7659
35
https://github.com/freqtrade/freqtrade.git
30
def test_use_DBSCAN_to_remove_outliers(mocker, freqai_conf, caplog): freqai = make_data_dictionary(mocker, freqai_conf) # freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 1}) freqai.dk.use_DBSCAN_to_remove_outliers(predict=False) assert log_has_re(r"DBSCAN found eps of 2\.
10
54
test_use_DBSCAN_to_remove_outliers
30
1
5
7
python/ray/tests/perf_integration_tests/test_perf_integration.py
131,150
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
15
Python
19
test_perf_integration.py
def warmup(): x = np.zeros(10 ** 6, dtype=np.uint8) for _ in range(5): for _ in range(5): ray.put(x) for _ in range(5): ray.get([dummy_task.remote(0) for _ in range(1000)]) @pytest.mark.benchmark @pytest.mark.parametrize("num_tasks", num_tasks_submitted, ids=num_tasks_ids)
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
@pytest.mark.benchmark @pytest.mark.parametrize("num_tasks", num_tasks_submitted, ids=num_tasks_ids)
70
https://github.com/ray-project/ray.git
69
def warmup(): x = np.zeros(10 ** 6, dtype=np.uint8) for _ in range(5): for _ in range(5): ray.put(x) for _ in range(5): ray.ge
20
142
warmup
24
0
3
8
homeassistant/components/zha/climate.py
290,796
Adjust type hints for ClimateEntityFeature (#82206)
core
9
Python
19
climate.py
def supported_features(self) -> ClimateEntityFeature: features = self._supported_flags if HVACMode.HEAT_COOL in self.hvac_modes: features |= ClimateEntityFeature.TARGET_TEMPERATURE_RANGE if self._fan is not None: self._supported_flags |= ClimateEntityFeature.FAN_MODE return features
39ac2c11017f84276cb23d15843dcccae5b104f4
44
https://github.com/home-assistant/core.git
81
def supported_features(self) -> ClimateEntityFeature: features = self._supported_flags if HVACMode.HEAT_COOL in self.hvac_modes:
11
72
supported_features