n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
16
0
2
4
salt/modules/win_certutil.py
215,879
Fix win_certutil module to handle paths with spaces
salt
9
Python
15
win_certutil.py
def __virtual__(): if salt.utils.platform.is_windows(): return __virtualname__ return False, "Module win_certutil: module only works on Windows systems."
53b3ebc92648c2081c58865713b50a2859ae8310
22
https://github.com/saltstack/salt.git
32
def __virtual__(): if salt.utils.platform.is_windows(): return __virtualname__
6
41
__virtual__
22
0
1
6
tests/sentry/api/endpoints/test_user_index.py
100,121
ref(tests): Remove `get_valid_response()` (#34822)
sentry
12
Python
14
test_user_index.py
def test_email_query(self): response = self.get_success_response(qs_params={"query": "email:bar@example.com"}) assert len(response.data) == 1 assert response.data[0]["id"] == str(self.superuser.id) response = self.get_success_response(qs_params={"query": "email:foobar"}) assert len(response.data) == 0
096b5511e244eecd8799b2a0324655207ce8985e
70
https://github.com/getsentry/sentry.git
56
def test_email_query(self): response = self.get_success_response(qs_params={"query": "email:bar@example.com"}) assert len(response.data) == 1 assert response.data[0]["id"] == str(self.superuser.id) response = s
10
119
test_email_query
32
0
2
8
rotate_string.py
22,768
refactor: clean code Signed-off-by: slowy07 <slowy.arfy@gmail.com>
Python
11
Python
25
rotate_string.py
def circular_rotate(s): s = list(s) idx = 0 mid = len(s) // 2 for i in reversed(range(mid, len(s))): s[idx], s[i] = s[i], s[idx] idx += 1 return s s = "aditya" print("".join(circular_rotate(s)))
f0af0c43340763724f139fa68aa1e5a9ffe458b4
61
https://github.com/geekcomputers/Python.git
58
def circular_rotate(s): s = list(s) idx = 0 mid = len(s) // 2 for i in reversed(range(mid, len(s))): s[idx], s[i] = s[i], s[
11
125
circular_rotate
14
1
1
4
tests/gamestonk_terminal/economy/test_economy_controller.py
282,025
Tests : Economy + Conftest (#1260) * Updating tests : economy * Updating tests : removing breaklines * Updating tests : economy * Updating tests : conftest * Updating tests : economy
OpenBBTerminal
9
Python
12
test_economy_controller.py
def test_switch(an_input, expected_queue): controller = economy_controller.EconomyController(queue=None) queue = controller.switch(an_input=an_input) assert queue == expected_queue @pytest.mark.vcr(record_mode="none")
683a8bdd83c1b931df111a5b2b8b19350930b73a
@pytest.mark.vcr(record_mode="none")
31
https://github.com/OpenBB-finance/OpenBBTerminal.git
21
def test_switch(an_input, expected_queue):
12
69
test_switch
33
0
1
3
airflow/decorators/base.py
45,079
Implement mapped value unpacking (#21641)
airflow
8
Python
31
base.py
def get_serialized_fields(cls): # The magic super() doesn't work here, so we use the explicit form. # Not using super(..., cls) to work around pyupgrade bug. sup = super(DecoratedMappedOperator, DecoratedMappedOperator) return sup.get_serialized_fields() | {"mapped_op_kwargs"}
46a337c8cda6fcc515fffe9a4e4cc324edaefa0a
23
https://github.com/apache/airflow.git
60
def get_serialized_fields(cls): # The magic super() doesn't work here, so we use the explicit form. # Not using super(..., cls) to work around pyupgrade bug. sup = super(DecoratedMappedOperator, DecoratedMappedOperator) return sup
5
41
get_serialized_fields
75
1
2
21
dask/dataframe/io/tests/test_parquet.py
156,279
Warn users that `engine='auto'` will change in future (#8907) * Warn user that `engine='auto'` will change in future Adds a warning to `read_parquet` and `to_parquet` that the meaning of `engine='auto'` will change in the future (switching to using `pyarrow` if it is installed, and falling back to `fastparquet`). This warning is only raised for users that have both libraries installed and have `engine='auto'`. Users without both libraries installed or that already specify an engine will not see this warning. * Fixup failing test * Only warn if backend-specific options are passed We now warn if: - `engine='auto'` - Both `pyarrow` and `fastparquet` are installed - Backend specific options are provided
dask
15
Python
58
test_parquet.py
def test_optimize_and_not(tmpdir, engine): path = os.path.join(tmpdir, "path.parquet") df = pd.DataFrame( {"a": [3, 4, 2], "b": [1, 2, 4], "c": [5, 4, 2], "d": [1, 2, 3]}, index=["a", "b", "c"], ) df.to_parquet(path, engine=engine) df2 = dd.read_parquet(path, engine=engine) df2a = df2["a"].groupby(df2["c"]).first().to_delayed() df2b = df2["b"].groupby(df2["c"]).first().to_delayed() df2c = df2[["a", "b"]].rolling(2).max().to_delayed() df2d = df2.rolling(2).max().to_delayed() (result,) = dask.compute(df2a + df2b + df2c + df2d) expected = [ dask.compute(df2a)[0][0], dask.compute(df2b)[0][0], dask.compute(df2c)[0][0], dask.compute(df2d)[0][0], ] for a, b in zip(result, expected): assert_eq(a, b) @write_read_engines()
bfc76afdcdc43c575a10ffefda94aaba424fe347
@write_read_engines()
274
https://github.com/dask/dask.git
161
def test_optimize_and_not(tmpdir, engine): path = os.path.join(tmpd
32
437
test_optimize_and_not
49
0
5
19
mindsdb/interfaces/model/model_controller.py
118,276
keep del company_id
mindsdb
15
Python
43
model_controller.py
def delete_model_version(self, models): if len(models) == 0: raise Exception(f"Version to delete is not found") for model in models: model_record = get_model_record( name=model['NAME'], project_name=model['PROJECT'], version=model['VERSION'] ) if model_record.active: raise Exception(f"Can't remove active version: f{model['PROJECT']}.{model['NAME']}.{model['VERSION']}") is_cloud = self.config.get('cloud', False) if is_cloud: model_record.deleted_at = dt.datetime.now() else: db.session.delete(model_record) modelStorage = ModelStorage(model_record.id) modelStorage.delete() db.session.commit()
402b28c300dbca149db75439c358f811ea1d36ac
118
https://github.com/mindsdb/mindsdb.git
258
def delete_model_version(self, models): if len(models) == 0: raise Exception(f"Version to dele
26
224
delete_model_version
11
0
2
4
dashboard/modules/job/tests/test_job_manager.py
129,927
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
11
Python
11
test_job_manager.py
async def test_unknown_job(self, job_manager): with pytest.raises(RuntimeError, match="Job 'unknown' does not exist."):
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
31
https://github.com/ray-project/ray.git
17
async def test_unknown_job(self, job_manager): with pytest.raises(RuntimeError, match="Job 'unknown' does not exist."):
7
34
test_unknown_job
17
0
1
5
tests/ludwig/utils/test_dataframe_utils.py
8,288
Ensure no ghost ray instances are running in tests (#2607) * Add test fixtures to prevent double initialization * contain dataframe utils test in ray test fixture * add fixture for training determinism test * fix typo in docstring
ludwig
11
Python
16
test_dataframe_utils.py
def test_to_numpy_dataset_with_pandas_backend_mismatch(ray_cluster_2cpu): pd_df = pd.DataFrame([[1, 2, 3]], columns=["col1", "col2", "col3"]) ray_backend = create_backend("ray") with pytest.raises(AttributeError): to_numpy_dataset(pd_df, backend=ray_backend)
0f509327aed9f9ec687543e1b851f66061603375
53
https://github.com/ludwig-ai/ludwig.git
32
def test_to_numpy_dataset_with_pandas_backend_mismatch(ray_cluster_2cpu): pd_df = pd.DataFrame([[
13
89
test_to_numpy_dataset_with_pandas_backend_mismatch
73
0
3
17
pandas/core/arrays/interval.py
164,211
REF: implement LossySetitemError (#45672)
pandas
15
Python
51
interval.py
def _validate_listlike(self, value): # list-like of intervals try: array = IntervalArray(value) self._check_closed_matches(array, name="value") value_left, value_right = array.left, array.right except TypeError as err: # wrong type: not interval or NA msg = f"'value' should be an interval type, got {type(value)} instead." raise TypeError(msg) from err try: self.left._validate_fill_value(value_left) except (LossySetitemError, TypeError) as err: msg = ( "'value' should be a compatible interval type, " f"got {type(value)} instead." ) raise TypeError(msg) from err return value_left, value_right
9a98aca4b57af277b7747e402029bd57088cba2c
89
https://github.com/pandas-dev/pandas.git
254
def _validate_listlike(self, value): # list-like of intervals try: array = Inter
17
166
_validate_listlike
59
1
2
18
saleor/payment/tests/test_utils.py
25,940
Refactor voucher and shipping amount for payment lines data
saleor
13
Python
49
test_utils.py
def test_create_refund_data_order_lines(order_with_lines, refund_shipping_costs): # given order_lines = order_with_lines.lines.all() order_refund_lines = [ OrderLineInfo(line=(line := order_lines[0]), quantity=2, variant=line.variant), OrderLineInfo(line=(line := order_lines[1]), quantity=1, variant=line.variant), ] fulfillment_refund_lines = [] # when refund_data = create_refund_data( order_with_lines, order_refund_lines, fulfillment_refund_lines, refund_shipping_costs, ) # then assert refund_data.lines == { line.variant_id: line.quantity - refund_line.quantity for line, refund_line in zip(order_lines, order_refund_lines) } assert refund_data.shipping == refund_shipping_costs @pytest.mark.parametrize("refund_shipping_costs", [True, False])
f365f77eb90579cb0dcccc24735f82e039b71f62
@pytest.mark.parametrize("refund_shipping_costs", [True, False])
120
https://github.com/saleor/saleor.git
149
def test_create_refund_data_order_lines(order_with_lines, refund_shipping_costs): # given order_lines = order_with_lines.lines.all() order_refund_lines = [ OrderLineInfo(line=(line := order_lines[0]), quantity=2, variant=
21
201
test_create_refund_data_order_lines
211
0
28
71
mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
113,967
cast column names on create predictor
mindsdb
17
Python
113
mysql_proxy.py
def answer_create_predictor(self, statement): struct = { 'predictor_name': statement.name.parts[-1], 'integration_name': statement.integration_name.parts[-1], 'select': statement.query_str, 'predict': [x.parts[-1] for x in statement.targets] } if len(struct['predict']) > 1: raise Exception("Only one field can be in 'PREDICT'") if statement.using is not None: struct['using'] = statement.using if statement.datasource_name is not None: struct['datasource_name'] = statement.datasource_name.parts[-1] if statement.order_by is not None: struct['order_by'] = [x.field.parts[-1] for x in statement.order_by] if len(struct['order_by']) > 1: raise Exception("Only one field can be in 'OPRDER BY'") if statement.group_by is not None: struct['group_by'] = [x.parts[-1] for x in statement.group_by] if statement.window is not None: struct['window'] = statement.window if statement.horizon is not None: struct['horizon'] = statement.horizon model_interface = self.session.model_interface data_store = self.session.data_store predictor_name = struct['predictor_name'] integration_name = struct['integration_name'] if integration_name.lower().startswith('datasource.'): ds_name = integration_name[integration_name.find('.') + 1:] ds = data_store.get_datasource_obj(ds_name, raw=True) ds_data = data_store.get_datasource(ds_name) else: if self.session.datasource_interface.get_db_integration(integration_name) is None: raise Exception(f"Unknown integration: {integration_name}") ds_name = struct.get('datasource_name') if ds_name is None: ds_name = data_store.get_vacant_name(predictor_name) ds = data_store.save_datasource(ds_name, integration_name, {'query': struct['select']}) ds_data = data_store.get_datasource(ds_name) timeseries_settings = {} for w in ['order_by', 'group_by', 'window', 'horizon']: if w in struct: timeseries_settings[w] = struct.get(w) kwargs = struct.get('using', {}) if len(timeseries_settings) > 0: if 'timeseries_settings' not in kwargs: kwargs['timeseries_settings'] = timeseries_settings else: if isinstance(kwargs.get('timeseries_settings'), str): kwargs['timeseries_settings'] = json.loads(kwargs['timeseries_settings']) kwargs['timeseries_settings'].update(timeseries_settings) ds_column_names = [x['name'] for x in ds_data['columns']] try: predict = self._check_predict_columns(struct['predict'], ds_column_names) except Exception: data_store.delete_datasource(ds_name) raise
4f36633784a0b0e8a043bbd71d5a1f2ae8f584ed
659
https://github.com/mindsdb/mindsdb.git
774
def answer_create_predictor(self, statement): struct = { 'predictor_name': statement.name.parts[-1], 'integration_name': statement.integration_name.parts[-1], 'select': statement.query_str, 'predict': [x.parts[-1] for x in statement.targets] } if len(struct['predict']) > 1: raise Exception("Only one field can be in 'PREDICT'") if statement.using is not None: struct['using'] = statement.using if statement.datasource_name is not None: struct['datasource_name'] = statement.datasource_name.parts[-1] if statement.order_by is not None: struct['order_by'] = [x.field.parts[-1] for x in statement.order_by] if len(struct['order_by']) > 1: raise Exception("Only one field can be in 'OPRDER BY'") if statemen
49
837
answer_create_predictor
15
0
1
5
tests/backends/sqlite/tests.py
201,790
Refs #33476 -- Reformatted code with Black.
django
4
Python
14
tests.py
def test_distinct_aggregation_multiple_args_no_distinct(self): # Aggregate functions accept multiple arguments when DISTINCT isn't # used, e.g. GROUP_CONCAT().
9c19aff7c7561e3a82978a272ecdaad40dda5c00
34
https://github.com/django/django.git
28
def test_distinct_aggregation_multiple_args_no_distinct(self): # Aggregate
2
11
test_distinct_aggregation_multiple_args_no_distinct
25
0
3
9
packages/syft/src/syft/core/node/common/node_service/tff/tff_service.py
2,049
fixed security tests
PySyft
13
Python
19
tff_service.py
def aux_recursive_od2d(dit): new_dict = {} for key in dit: if type(dit[key]) == collections.OrderedDict: new_elem = aux_recursive_od2d(dit[key]) new_dict[key] = new_elem else: new_dict[key] = dit[key] return new_dict
5724f9c7db0a840c6c209d1d02d03c24ddf59d35
55
https://github.com/OpenMined/PySyft.git
80
def aux_recursive_od2d(dit): new_dict = {} for key in dit: if type(dit[key]) == collections.OrderedDict: new_elem = aux_recursive_od2d(dit[key]) new_dict[key] = new_elem
8
86
aux_recursive_od2d
331
0
8
70
python/ray/tune/examples/cifar10_pytorch.py
124,070
[air] update documentation to use `session.report` (#26051) Update documentation to use `session.report`. Next steps: 1. Update our internal caller to use `session.report`. Most importantly, CheckpointManager and DataParallelTrainer. 2. Update `get_trial_resources` to use PGF notions to incorporate the requirement of ResourceChangingScheduler. @Yard1 3. After 2 is done, change all `tune.get_trial_resources` to `session.get_trial_resources` 4. [internal implementation] remove special checkpoint handling logic from huggingface trainer. Optimize the flow for checkpoint conversion with `session.report`. Co-authored-by: Antoni Baum <antoni.baum@protonmail.com>
ray
18
Python
201
cifar10_pytorch.py
def train_cifar(config): net = Net(config["l1"], config["l2"]) device = "cpu" if torch.cuda.is_available(): device = "cuda:0" if torch.cuda.device_count() > 1: net = nn.DataParallel(net) net.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=config["lr"], momentum=0.9) # Load existing checkpoint through `session.get_checkpoint()` API. if session.get_checkpoint(): loaded_checkpoint = session.get_checkpoint() with loaded_checkpoint.as_directory() as loaded_checkpoint_dir: model_state, optimizer_state = torch.load(os.path.join(loaded_checkpoint_dir, "checkpoint.pt")) net.load_state_dict(model_state) optimizer.load_state_dict(optimizer_state) data_dir = os.path.abspath("./data") trainset, testset = load_data(data_dir) test_abs = int(len(trainset) * 0.8) train_subset, val_subset = random_split( trainset, [test_abs, len(trainset) - test_abs]) trainloader = torch.utils.data.DataLoader( train_subset, batch_size=int(config["batch_size"]), shuffle=True, num_workers=8) valloader = torch.utils.data.DataLoader( val_subset, batch_size=int(config["batch_size"]), shuffle=True, num_workers=8) for epoch in range(10): # loop over the dataset multiple times running_loss = 0.0 epoch_steps = 0 for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.item() epoch_steps += 1 if i % 2000 == 1999: # print every 2000 mini-batches print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1, running_loss / epoch_steps)) running_loss = 0.0 # Validation loss val_loss = 0.0 val_steps = 0 total = 0 correct = 0 for i, data in enumerate(valloader, 0): with torch.no_grad(): inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) outputs = net(inputs) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() loss = criterion(outputs, labels) val_loss += loss.cpu().numpy() val_steps += 1 # Here we save a checkpoint. It is automatically registered with # Ray Tune and will potentially be accessed through in ``session.get_checkpoint()`` # in future iterations. # Note to save a file like checkpoint, you still need to put it under a directory # to construct an AIR checkpoint. os.makedirs("my_model", exist_ok=True) # ok to overwrite the previous one. path = os.path.join("my_model", "checkpoint.pt") torch.save( (net.state_dict(), optimizer.state_dict()), path) checkpoint = Checkpoint.from_directory("my_model") session.report({"loss": (val_loss / val_steps), "accuracy": correct / total}, checkpoint=checkpoint) print("Finished Training") # __train_end__ # __test_acc_begin__
ac831fded416381ad3c7fe2ba135eaa4aaab5879
572
https://github.com/ray-project/ray.git
1,035
def train_cifar(config): net = Net(config["l1"], config["l2"]) device = "cpu" if torch.cuda.is_available(): device = "cuda:0" if torch.cuda.device_count() > 1: net = nn.DataParallel(net) net.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=config["lr"], momentum=0.9) # Load existing checkpoint through `session.get_checkpoint()` API. if session.get_checkpoint(): loaded_checkpoint = session.get_checkpoint() with loaded_checkpoint.as_directory() as loaded_checkpoint_dir: model_state, optimizer_state = torch.load(os.path.join(loaded_checkpoint_dir, "checkpoint.pt")) net.load_state_dict(model_state) optimizer.load_state_dict(optimizer_state) data_dir = os.path.abspath("./data") trainset, testset = load_data(data_dir) test_abs = int(len(trainset) * 0.8) train_subset, val_subset = random_split( trainset, [test_abs, len(trainset) - test_abs]) trainloader = torch.utils.data.DataLoader( train_subset, batch_size=int(config["batch_size"]), shuffle=True, num_workers=8) valloader = torch.utils.data.DataLoader( val_subset, batch_size=int(config["batch_size"]), shuffle=True, num_workers=8) for epoch in range(10): # loop over the dataset multiple times running_loss = 0.0 epoch_steps = 0 for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize
86
935
train_cifar
34
0
1
14
tests/pipelines/test_pipelines_common.py
32,004
feat: add pipeline registry abstraction (#17905) * feat: add pipeline registry abstraction - added `PipelineRegistry` abstraction - updates `add_new_pipeline.mdx` (english docs) to reflect the api addition - migrate `check_task` and `get_supported_tasks` from transformers/pipelines/__init__.py to transformers/pipelines/base.py#PipelineRegistry.{check_task,get_supported_tasks} Signed-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com> * fix: update with upstream/main chore: Apply suggestions from sgugger's code review Signed-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * chore: PR updates - revert src/transformers/dependency_versions_table.py from upstream/main - updates pipeline registry to use global variables Signed-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com> * tests: add tests for pipeline registry Signed-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com> * tests: add test for output warning. Signed-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com> * chore: fmt and cleanup unused imports Signed-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com> * fix: change imports to top of the file and address comments Signed-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
transformers
13
Python
32
test_pipelines_common.py
def test_register_pipeline(self): custom_text_classification = { "impl": CustomPipeline, "tf": (), "pt": (AutoModelForSequenceClassification,), "default": {"model": {"pt": "hf-internal-testing/tiny-random-distilbert"}}, "type": "text", } PIPELINE_REGISTRY.register_pipeline("custom-text-classification", custom_text_classification) assert "custom-text-classification" in PIPELINE_REGISTRY.get_supported_tasks() task_def, _ = PIPELINE_REGISTRY.check_task("custom-text-classification") self.assertEqual(task_def, custom_text_classification) self.assertEqual(task_def["type"], "text") self.assertEqual(task_def["impl"], CustomPipeline)
49cd736a288a315d741e5c337790effa4c9fa689
97
https://github.com/huggingface/transformers.git
144
def test_register_pipeline(self): custom_text_classification = { "impl": CustomPipeline, "tf": (), "pt": (AutoModelForSequenceClassification,), "default": {"model": {"pt": "hf-internal-testing/tiny-random-d
12
174
test_register_pipeline
91
0
2
26
airbyte-integrations/connectors/source-chargebee/source_chargebee/source.py
4,089
:tada: Source Chargebee: add credit note model (#10795) * feat(chargebee) add credit note model * fix(airbyte): update version Dockerfile * fix(airbyte): update version Dockerfile v2
airbyte
12
Python
55
source.py
def streams(self, config) -> List[Stream]: # Configure the Chargebee Python SDK chargebee.configure(api_key=config["site_api_key"], site=config["site"]) kwargs = {"start_date": config["start_date"]} product_catalog_version = config["product_catalog"] # Below streams are suitable for both `Product Catalog 1.0` and `Product Catalog 2.0`. common_streams = [ Coupon(**kwargs), CreditNote(**kwargs), Customer(**kwargs), Event(**kwargs), Invoice(**kwargs), Order(**kwargs), Subscription(**kwargs), Transaction(**kwargs), ] if product_catalog_version == "1.0": # Below streams are suitable only for `Product Catalog 1.0`. product_catalog_v1_streams = [ Addon(**kwargs), Plan(**kwargs), ] return common_streams + product_catalog_v1_streams # Below streams are suitable only for `Product Catalog 2.0`. product_catalog_v2_streams = [ Item(**kwargs), ItemPrice(**kwargs), AttachedItem(**kwargs), ] return common_streams + product_catalog_v2_streams
16dfae0df7290392e5f0754731ae53ae7b7f939d
149
https://github.com/airbytehq/airbyte.git
369
def streams(self, config) -> List[Stream]: # Configure the Chargebee Python SDK chargebee.configure(api_key=config["site_api_key"], site=config["site"]) kwargs = {"start_date": config["start_date"]} product_catalog_version = config["product_catalog"] # Below streams are suitable for both `Product Catalog 1.0` and `Product Catalog 2.0`. common_streams = [ Coupon(**kwargs), CreditNote(**kwargs), Customer(**kwargs), Event(**kwargs), Invoice(**kwargs), Order(**kwargs), Subscription(**kwargs), Transaction(**kwargs), ] if product_catalog_version == "1.0": # Below streams are suitable only for `Product Catalog 1.0`. product_catalog_v1_s
27
242
streams
35
0
1
8
saleor/plugins/webhook/tests/subscription_webhooks/test_create_deliveries_for_subscriptions_payments.py
28,352
GraphQL subscription support for synchronous webhook events (#9763) * WIP add sync webhooks subscription payload handling * add tests, fix minor things * update schema * remove unneeded code * add fix for circular field resolve * fix-filter-shipping-methods-payload * added_in added to desription * add missing types * revert refactor, precommit issues * fixes after review * cosmetix fixes post-review * subscription types description fixes * remove unneeded description from PaymentBase * add validation for creating webhook with two top level fields, add tests for shippingListMethodsForCheckout * add docstring, refactor prevent_sync_event_circular_wuery wrapper * fix docstring of revent_sync_event_circular_query * fix linters
saleor
11
Python
25
test_create_deliveries_for_subscriptions_payments.py
def test_payment_confirm(payment, subscription_payment_confirm_webhook): # given webhooks = [subscription_payment_confirm_webhook] event_type = WebhookEventSyncType.PAYMENT_CONFIRM # when deliveries = create_deliveries_for_subscriptions(event_type, payment, webhooks) # then expected_payload = generate_payment_payload(payment) assert json.loads(deliveries[0].payload.payload) == expected_payload assert len(deliveries) == len(webhooks) assert deliveries[0].webhook == webhooks[0]
8201efcde2d7aacccf3512c544cceea6780a0598
71
https://github.com/saleor/saleor.git
64
def test_payment_confirm(payment, subscription_payment_confirm_webhook): # given webhooks = [subscription_payment_confirm_webhoo
16
111
test_payment_confirm
67
0
1
34
tests/pytests/unit/state/test_state_compiler.py
215,070
Fix test cases with PermissionError on /var/cache/salt When running the test cases without root permission, some test cases fail: ``` $ python3 -m pytest -ra tests/pytests/unit/state/test_state_compiler.py tests/pytests/unit/test_minion.py [...] FAILED tests/pytests/unit/state/test_state_compiler.py::test_render_requisite_require_disabled - PermissionError: [Errno 13] Permission denied: '/var/cache/salt' FAILED tests/pytests/unit/state/test_state_compiler.py::test_render_requisite_require_in_disabled - PermissionError: [Errno 13] Permission denied: '/var/cache/salt' FAILED tests/pytests/unit/test_minion.py::test_minion_module_refresh - PermissionError: [Errno 13] Permission denied: '/var/cache/salt' FAILED tests/pytests/unit/test_minion.py::test_minion_module_refresh_beacons_refresh - PermissionError: [Errno 13] Permission denied: '/var/cache/salt' ``` Fix these test cases by using a temporary directory as cache directory. Signed-off-by: Benjamin Drung <benjamin.drung@ionos.com>
salt
25
Python
53
test_state_compiler.py
def test_render_requisite_require_disabled(tmp_path): with patch("salt.state.State._gather_pillar") as state_patch: high_data = { "step_one": OrderedDict( [ ( "test", [ OrderedDict( [("require", [OrderedDict([("test", "step_two")])])] ), "succeed_with_changes", {"order": 10000}, ], ), ("__sls__", "test.disable_require"), ("__env__", "base"), ] ), "step_two": { "test": ["succeed_with_changes", {"order": 10001}], "__env__": "base", "__sls__": "test.disable_require", }, } minion_opts = salt.config.DEFAULT_MINION_OPTS.copy() minion_opts["cachedir"] = str(tmp_path) minion_opts["disabled_requisites"] = ["require"] state_obj = salt.state.State(minion_opts) ret = state_obj.call_high(high_data) run_num = ret["test_|-step_one_|-step_one_|-succeed_with_changes"][ "__run_num__" ] assert run_num == 0
fae21e4698d9bb45a407345e7dff5ce3b69f799d
160
https://github.com/saltstack/salt.git
557
def test_render_requisite_require_disabled(tmp_path): with patch("salt.state.State._gather_pillar") as state_patch: high_data = { "step_one": OrderedDict( [ ( "test", [ OrderedDict( [("require", [OrderedDict([("test", "step_two")])])] ), "succeed_w
18
288
test_render_requisite_require_disabled
6
0
1
2
django/core/checks/security/sessions.py
204,451
Refs #33476 -- Reformatted code with Black.
django
7
Python
6
sessions.py
def _session_middleware(): return "django.contrib.sessions.middleware.SessionMiddleware" in settings.MIDDLEWARE
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
https://github.com/django/django.git
8
def _session_middleware(): return "d
3
19
_session_middleware
16
0
2
14
mindsdb/integrations/handlers/lightwood_handler/tests/test_lightwood_handler.py
115,854
TS tests pass
mindsdb
13
Python
15
test_lightwood_handler.py
def test_13_train_ts_predictor_no_gby_hor1(self): query = f if self.test_model_2 not in self.handler.get_tables().data_frame.values: self.handler.native_query(query) else: self.handler.native_query(f"DROP PREDICTOR {self.test_model_2}") self.handler.native_query(query)
74977c69effc89a56080357449d5d337988daab7
54
https://github.com/mindsdb/mindsdb.git
69
def test_13_train_ts_predictor_no_gby_hor1(self): query = f if self.test_
11
114
test_13_train_ts_predictor_no_gby_hor1
130
0
4
29
pandas/core/exchange/column.py
166,205
ENH: Implement DataFrame interchange protocol (#46141)
pandas
16
Python
97
column.py
def _get_offsets_buffer(self) -> Tuple[PandasBuffer, Any]: if self.dtype[0] == DtypeKind.STRING: # For each string, we need to manually determine the next offset values = self._col.to_numpy() ptr = 0 offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64) for i, v in enumerate(values): # For missing values (in this case, `np.nan` values) # we don't increment the pointer if isinstance(v, str): b = v.encode(encoding="utf-8") ptr += len(b) offsets[i + 1] = ptr # Convert the offsets to a Pandas "buffer" using # the NumPy array as the backing store buffer = PandasBuffer(offsets) # Assemble the buffer dtype info dtype = ( DtypeKind.INT, 64, ArrowCTypes.INT64, Endianness.NATIVE, ) # note: currently only support native endianness else: raise NoBufferPresent( "This column has a fixed-length dtype so " "it does not have an offsets buffer" ) return buffer, dtype
90140f055892a46f473bd26affab88a7f171e394
139
https://github.com/pandas-dev/pandas.git
490
def _get_offsets_buffer(self) -> Tuple[PandasBuffer, Any]: if self.dtype[0] == DtypeKind.STRING: # For each string, we need to manually determine the next offset values = self._col.to_numpy() ptr = 0 offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64) for i, v in enumerate(values): # For missing values (in this case, `np.nan` values) # we don't increment the pointer if isinstance(v, str): b = v.encode(encoding="utf-8") ptr += len(b) offsets[i + 1] = ptr # Convert the offsets to a Pandas "buffer" using # the NumPy array as the backing store buffer = PandasBuffer(offsets) # Assemble the buffer dtype info dtype = ( DtypeKind.INT, 64, ArrowCTypes.INT64, Endianness.NATIVE, ) # note: currently only support native endianness else: raise NoBufferPresent( "This column has a fixed-length dtype so " "it does not have an offsets buffer" ) return buffer, dtype
33
228
_get_offsets_buffer
35
1
3
11
rllib/evaluate.py
137,777
[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)
ray
13
Python
22
evaluate.py
def append_step(self, obs, action, next_obs, reward, terminated, truncated, info): if self._outfile: if self._save_info: self._current_rollout.append( [obs, action, next_obs, reward, terminated, truncated, info] ) else: self._current_rollout.append( [obs, action, next_obs, reward, terminated, truncated] ) self._total_steps += 1 @eval_app.command()
8e680c483ce326cefc62e44f68ab1a6948b1c3d2
@eval_app.command()
79
https://github.com/ray-project/ray.git
175
def append_step(self, obs, action, next_obs, reward, terminated, truncated, info): if self._outfile: if self._save_info: self._current_rollout.append( [obs, action, next_obs, reward, terminated, truncated, info] ) else: self._cur
16
120
append_step
12
0
1
2
django/contrib/gis/gdal/libgdal.py
203,991
Refs #33476 -- Reformatted code with Black.
django
8
Python
11
libgdal.py
def err_handler(error_class, error_number, message): logger.error("GDAL_ERROR %d: %s", error_number, message) err_handler = CPLErrorHandler(err_handler)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
19
https://github.com/django/django.git
13
def err_handler(error_class, error_number, message): logger.error("GDAL_ERROR %d: %s", error_number, messa
7
39
err_handler
13
0
2
2
rllib/agents/pg/pg.py
133,783
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
10
Python
13
pg.py
def get_default_policy_class(self, config) -> Type[Policy]: return PGTorchPolicy if config.get("framework") == "torch" else PGTFPolicy
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
25
https://github.com/ray-project/ray.git
19
def get_default_policy_class(self, config) -> Type[Policy]: return PGTorchPolicy if config.get("framework") == "torch" else PGTFPolicy
8
41
get_default_policy_class
58
0
4
36
jina/orchestrate/flow/base.py
11,806
fix: properly disable graphql endpoint by default (#4471)
jina
14
Python
38
base.py
def _get_address_table(self, address_table): address_table.add_row('🔗', 'Protocol: ', f'{self.protocol}') address_table.add_row( '🏠', 'Local access: ', f'[underline]{self.host}:{self.port}[/underline]', ) address_table.add_row( '🔒', 'Private network: ', f'[underline]{self.address_private}:{self.port}[/underline]', ) if self.address_public: address_table.add_row( '🌐', 'Public address: ', f'[underline]{self.address_public}:{self.port}[/underline]', ) if self.protocol == GatewayProtocolType.HTTP: address_table.add_row( '💬', 'Swagger UI: ', f'[underline]http://localhost:{self.port}/docs[/underline]', ) address_table.add_row( '📚', 'Redoc: ', f'[underline]http://localhost:{self.port}/redoc[/underline]', ) if self.args.expose_graphql_endpoint: address_table.add_row( '💬', 'GraphQL UI: ', f'[underline][cyan]http://localhost:{self.port}/graphql[/underline][/cyan]', ) return address_table
9bdefb0a693c4556e0902ef913f4426e3ef5ff65
113
https://github.com/jina-ai/jina.git
478
def _get_address_table(self, address_table): address_table.add_row('🔗', 'Protocol: ', f'{self.protocol}') address_table.add_row( '🏠', 'Local access: ', f'[underline]{self.host}:{self.port}[/underline]', ) address_table.add_row( '🔒', 'Private network: ', f'[underline]{self.address_private}:{self.port}[/underline]', ) if self.address_public: address_table.add_row( '🌐', 'Public address: ', f'[underline]{self.address_public}:{self.port}[/underline]', ) if self.protocol == GatewayProtocolType.HTTP: address_table.add_row( '💬', 'Swagger UI: ', f'[underline]http://localhost:{self.port}/docs[/underline]', ) address_table.add_row( '📚', 'Redoc: ', f'[underline]http://localhost:{self.port}/redoc[/underline]', ) if self.args.expose_graphql_e
13
263
_get_address_table
47
0
4
10
test/test_prototype_builtin_datasets.py
191,954
refactor prototype datasets tests (#5136) * refactor prototype datasets tests * skip tests with insufficient third party dependencies
vision
13
Python
40
test_prototype_builtin_datasets.py
def test_sample(self, dataset_mock, config): dataset, _ = dataset_mock.load(config) try: sample = next(iter(dataset)) except Exception as error: raise AssertionError("Drawing a sample raised the error above.") from error if not isinstance(sample, dict): raise AssertionError(f"Samples should be dictionaries, but got {type(sample)} instead.") if not sample: raise AssertionError("Sample dictionary is empty.")
3e79d149a16e9fc9b9b6747609615cf8f8607346
66
https://github.com/pytorch/vision.git
125
def test_sample(self, dataset_mock, config): dataset, _ = dataset_mock.load(config) try: sample = next(iter(dataset)) except Exception as error: raise AssertionError("Drawing a sample raised the error above.") from error if not isinstance(sample, dict):
16
119
test_sample
20
0
1
7
tests/components/http/test_init.py
292,279
Startup with an emergency self signed cert if the ssl certificate cannot be loaded (#66707)
core
9
Python
15
test_init.py
def _setup_broken_ssl_pem_files(tmpdir): test_dir = tmpdir.mkdir("test_broken_ssl") cert_path = pathlib.Path(test_dir) / "cert.pem" cert_path.write_text("garbage") key_path = pathlib.Path(test_dir) / "key.pem" key_path.write_text("garbage") return cert_path, key_path
3bf2be1765f7a33fbce06cbabeb2e2115f2f07c7
49
https://github.com/home-assistant/core.git
37
def _setup_broken_ssl_pem_files(tmpdir): test_dir = tmpdir.mkdir("test_broken_ssl") cert_path = pathlib.Path(test_dir) / "cert.pem" cert_path.write_text("garbage") key_path = pathlib.Path(test_dir) / "key.pem" key_path.write_text("garbage") return cert_path, key_path
9
90
_setup_broken_ssl_pem_files
16
0
2
12
src/prefect/client/orion.py
59,097
Improve client stability (#7090)
prefect
11
Python
15
orion.py
async def api_healthcheck(self) -> Optional[Exception]: try: await self._client.get("/health") return None except Exception as exc: return exc
852644aa77ce2a6377e070ed0182859b0fdd0b98
31
https://github.com/PrefectHQ/prefect.git
70
async def api_healthcheck(self) -> Optional[Exception]: try: await self._client.get("/health") return None except Exception as exc: return exc
7
57
api_healthcheck
29
0
2
6
ludwig/backend/ray.py
7,477
Respect the resource requests in RayPredictor (#2359) * fix ordering to respect the passed down resources * clean up
ludwig
11
Python
24
ray.py
def get_resources_per_worker(self) -> Tuple[int, int]: trainer_kwargs = self.get_trainer_kwargs() resources_per_worker = trainer_kwargs.get("resources_per_worker", {}) num_gpus = resources_per_worker.get("GPU", 0) num_cpus = resources_per_worker.get("CPU", (1 if num_gpus == 0 else 0)) return num_cpus, num_gpus
15961c07d8a0d7fc25c5b7395b11f191eab840bd
62
https://github.com/ludwig-ai/ludwig.git
63
def get_resources_per_worker(self) -> Tuple[int, int]: trainer_kwargs = self.get_trainer_kwargs() resources_per_worker = trainer_kwargs.get("resources_per_worker", {}) num_gpus = resources_per_worker.get("GPU", 0) num_cpus = resources_per_worker.get("CPU", (1 if num_g
10
100
get_resources_per_worker
288
0
9
130
bots/stocks/technical_analysis/aroon.py
282,658
Refactor Bot (#1326) * First commit * Renamed to bot * change from discordbot to bots imports * refractor cmds + autocomplete helpers + updated dps plots/candle + quote cmd * isort fix * Update helpers.py * Added first refactor * Added commands and refactor * Added description handler * Through grains * ta + image border helper * Added indices * merged * Finished econ * options refactor * dark_pool_shorts, candle, quote done * pagination/dropdown logic to helpers + fixes * dd/disc/insider done + add image logic to helpers * linting * removed plt style * Almost done with Gov * fixing conflicts * Finished gov (excpect ones mentioned) * screener done * Test bug refactor * Store changes to switch * Finished refactor * Fixed error * Delete ::40 * Fixed issue * Fixed black * finished * fixes Co-authored-by: teh_coderer <me@tehcoderer.com> Co-authored-by: didierlopes.eth <dro.lopes@campus.fct.unl.pt>
OpenBBTerminal
14
Python
178
aroon.py
def aroon_command(ticker="", length="25", scalar="100", start="", end=""): # Debug if cfg.DEBUG: logger.debug( "ta-aroon %s %s %s %s %s", ticker, length, scalar, start, end, ) # Check for argument if ticker == "": raise Exception("Stock ticker is required") if start == "": start = datetime.now() - timedelta(days=365) else: start = datetime.strptime(start, cfg.DATE_FORMAT) if end == "": end = datetime.now() else: end = datetime.strptime(end, cfg.DATE_FORMAT) if not length.lstrip("-").isnumeric(): raise Exception("Number has to be an integer") length = int(length) if not scalar.lstrip("-").isnumeric(): raise Exception("Number has to be an integer") scalar = float(scalar) ticker = ticker.upper() df_stock = helpers.load(ticker, start) if df_stock.empty: raise Exception("Stock ticker is invalid") # Retrieve Data df_stock = df_stock.loc[(df_stock.index >= start) & (df_stock.index < end)] df_ta = trend_indicators_model.aroon( df_stock["High"], df_stock["Low"], length, scalar ) # Output Data aadown = df_ta.columns[0].replace("_", " ") aaup = df_ta.columns[1].replace("_", " ") aaosc = df_ta.columns[2].replace("_", " ") fig = make_subplots( rows=3, cols=1, shared_xaxes=True, vertical_spacing=0.07, row_width=[0.2, 0.2, 0.2], ) fig.add_trace( go.Scatter( name=ticker, x=df_stock.index, y=df_stock["Adj Close"].values, line=dict(color="#fdc708", width=2), opacity=1, showlegend=False, ), row=1, col=1, ) fig.add_trace( go.Scatter( name=f"Aroon DOWN ({aadown})", x=df_ta.index, y=df_ta.iloc[:, 0].values, opacity=1, ), row=2, col=1, ) fig.add_trace( go.Scatter( name=f"Aroon UP ({aaup})", x=df_ta.index, y=df_ta.iloc[:, 1].values, opacity=1, ), row=2, col=1, ) fig.add_trace( go.Scatter( name=f"Aroon OSC ({aaosc})", x=df_ta.index, y=df_ta.iloc[:, 2].values, opacity=1, ), row=3, col=1, ) fig.add_hline( y=50, fillcolor="grey", opacity=1, layer="below", line_width=3, line=dict(color="grey", dash="dash"), row=2, col=1, ) fig.update_layout( margin=dict(l=0, r=20, t=30, b=20), template=cfg.PLT_TA_STYLE_TEMPLATE, colorway=cfg.PLT_TA_COLORWAY, title=f"Aroon on {ticker}", title_x=0.5, yaxis_title="Stock Price ($)", yaxis=dict( fixedrange=False, ), xaxis=dict( rangeslider=dict(visible=False), type="date", ), dragmode="pan", legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01), ) config = dict({"scrollZoom": True}) imagefile = "ta_aroon.png" # Check if interactive settings are enabled plt_link = "" if cfg.INTERACTIVE: html_ran = helpers.uuid_get() fig.write_html(f"in/aroon_{html_ran}.html", config=config) plt_link = f"[Interactive]({cfg.INTERACTIVE_URL}/aroon_{html_ran}.html)" fig.update_layout( width=800, height=500, ) imagefile = helpers.image_border(imagefile, fig=fig) return { "title": f"Stocks: Aroon-Indicator {ticker}", "description": plt_link, "imagefile": imagefile, }
fae93c67adc9015c1466712f9c8ffa35a8b70872
757
https://github.com/OpenBB-finance/OpenBBTerminal.git
1,149
def aroon_command(ticker="", length="25", scalar="100", start="", end=""): # Debug if cfg.DEBUG: logger.debug( "ta-aroon %s %s %s %s %s", ticker, length, scalar, start, end, ) # Check for argument if ticker == "": raise Exception("Stock ticker is required") if start == "": start = datetime.now() - timedelta(days=365) else: start = datetime.strptime(start, cfg.DATE_FORMAT) if end == "": end = datetime.now() else: end = datetime.strptime(end, cfg.DATE_FORMAT) if not length.lstrip("-").isnumeric(): raise Excep
97
1,200
aroon_command
19
0
3
9
python/ray/serve/config.py
145,616
[Serve] Add test for controller managing Java Replica (#22628)
ray
13
Python
16
config.py
def to_proto_bytes(self): data = self.dict() if data.get("user_config"): data["user_config"] = pickle.dumps(data["user_config"]) if data.get("autoscaling_config"): data["autoscaling_config"] = AutoscalingConfigProto( **data["autoscaling_config"] ) return DeploymentConfigProto(**data).SerializeToString()
0bab8dbfe0b5a58a06dd226950bdd70b0b186655
65
https://github.com/ray-project/ray.git
94
def to_proto_bytes(self): data = self.dict() if data.get("user_config"): data["user_conf
10
117
to_proto_bytes
16
0
2
7
homeassistant/components/plex/media_player.py
306,988
Use new media player enums [p] (#78058)
core
11
Python
14
media_player.py
def session(self, session): self._session = session if session: self.session_device = self.session.player self.update_state(self.session.state) else: self._attr_state = MediaPlayerState.IDLE
52b5e1779f1ed6e5005dc0bdff4137040d7216fb
43
https://github.com/home-assistant/core.git
69
def session(self, session): self._session = session if session: self.session_device = self.session
10
69
session
146
0
10
21
python3.10.4/Lib/difflib.py
222,505
add python 3.10.4 for windows
XX-Net
15
Python
76
difflib.py
def get_grouped_opcodes(self, n=3): codes = self.get_opcodes() if not codes: codes = [("equal", 0, 1, 0, 1)] # Fixup leading and trailing groups if they show no changes. if codes[0][0] == 'equal': tag, i1, i2, j1, j2 = codes[0] codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2 if codes[-1][0] == 'equal': tag, i1, i2, j1, j2 = codes[-1] codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n) nn = n + n group = [] for tag, i1, i2, j1, j2 in codes: # End the current group and start a new one whenever # there is a large range with no changes. if tag == 'equal' and i2-i1 > nn: group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n))) yield group group = [] i1, j1 = max(i1, i2-n), max(j1, j2-n) group.append((tag, i1, i2, j1 ,j2)) if group and not (len(group)==1 and group[0][0] == 'equal'): yield group
8198943edd73a363c266633e1aa5b2a9e9c9f526
276
https://github.com/XX-net/XX-Net.git
386
def get_grouped_opcodes(self, n=3): codes = self.get_opcodes() if not codes: codes = [("equal", 0, 1, 0, 1)] # Fixup leading and trailing groups if they show no changes. if codes[0][0] == 'equal': tag, i1, i2, j1, j2 = codes[0] codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2 if codes[-1][0] == 'equal': tag, i1, i2, j1, j2 = codes[-1] codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n) nn = n + n group = [] for tag, i1, i2, j1, j2 in codes: # End the current group and start a new one whenever # there is a large range with no changes. if tag == 'equal' and i2-i1 > nn: group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n))) yield group group = [] i1, j1 = max(i1, i2-n), max(j1, j2-n)
16
410
get_grouped_opcodes
25
0
3
5
wagtail/admin/forms/pages.py
70,522
Drop support for Django <3.2
wagtail
10
Python
21
pages.py
def is_valid(self): comments = self.formsets.get('comments') # Remove the comments formset if the management form is invalid if comments and not comments.management_form.is_valid(): del self.formsets['comments'] return super().is_valid()
d6d43338efdeb85395918d106a1cb3f187ab6fa7
42
https://github.com/wagtail/wagtail.git
63
def is_valid(self): comments = self.formsets.get('comments') # Remove the comments formset if the management form is invalid if comments and not comments.management_form.is_valid
7
74
is_valid
12
0
8
43
tests/unit/hubble/test_hubio.py
12,109
feat(hub): add --no-cache option to "jina hub push" cli (#4594)
jina
8
Python
12
test_hubio.py
def test_push(mocker, monkeypatch, path, mode, tmpdir, force, tag, no_cache): mock = mocker.Mock()
fab9f0736c8d99558d93020cb3f27108627218f1
313
https://github.com/jina-ai/jina.git
14
def test_push(mocker, monkeypatch, path, mode, tmpdir, force, tag, no_cache): mock = mocker.Mock()
11
35
test_push
643
0
66
164
label_studio/data_manager/managers.py
177,832
fix: DEV-2214: Use contain instead of icontain in annotation result (#2308) * fix: Annotation result performance with contain * Back * Works! * Some * Try to fix completed at performance * Fix tests * Add merge migration
label-studio
24
Python
267
managers.py
def apply_filters(queryset, filters, project): if not filters: return queryset # convert conjunction to orm statement filter_expressions = [] for _filter in filters.items: # we can also have annotations filters if not _filter.filter.startswith("filter:tasks:") or _filter.value is None: continue # django orm loop expression attached to column name preprocess_field_name = load_func(settings.PREPROCESS_FIELD_NAME) field_name, _ = preprocess_field_name(_filter.filter, project.only_undefined_field) # filter preprocessing, value type conversion, etc.. preprocess_filter = load_func(settings.DATA_MANAGER_PREPROCESS_FILTER) _filter = preprocess_filter(_filter, field_name) # custom expressions for enterprise custom_filter_expressions = load_func(settings.DATA_MANAGER_CUSTOM_FILTER_EXPRESSIONS) filter_expression = custom_filter_expressions(_filter, field_name) if filter_expression: filter_expressions.append(filter_expression) continue # annotators if field_name == 'annotators' and _filter.operator == Operator.CONTAINS: filter_expressions.append(Q(annotations__completed_by=int(_filter.value))) continue elif field_name == 'annotators' and _filter.operator == Operator.NOT_CONTAINS: filter_expressions.append(~Q(annotations__completed_by=int(_filter.value))) continue elif field_name == 'annotators' and _filter.operator == Operator.EMPTY: value = cast_bool_from_str(_filter.value) filter_expressions.append(Q(annotations__completed_by__isnull=value)) continue # annotations results & predictions results if field_name in ['annotations_results', 'predictions_results']: result = add_result_filter(field_name, _filter, filter_expressions, project) if result == 'exit': return queryset.none() elif result == 'continue': continue # annotation ids if field_name == 'annotations_ids': field_name = 'annotations__id' if 'contains' in _filter.operator: # convert string like "1 2,3" => [1,2,3] _filter.value = [int(value) for value in re.split(',|;| ', _filter.value) if value and value.isdigit()] _filter.operator = 'in_list' if _filter.operator == 'contains' else 'not_in_list' elif 'equal' in _filter.operator: if not _filter.value.isdigit(): _filter.value = 0 # annotators if field_name == 'annotators' and _filter.operator == Operator.CONTAINS: filter_expressions.append(Q(annotations__completed_by=int(_filter.value))) continue elif field_name == 'annotators' and _filter.operator == Operator.NOT_CONTAINS: filter_expressions.append(~Q(annotations__completed_by=int(_filter.value))) continue elif field_name == 'annotators' and _filter.operator == Operator.EMPTY: value = cast_bool_from_str(_filter.value) filter_expressions.append(Q(annotations__completed_by__isnull=value)) continue # predictions model versions if field_name == 'predictions_model_versions' and _filter.operator == Operator.CONTAINS: q = Q() for value in _filter.value: q |= Q(predictions__model_version__contains=value) filter_expressions.append(q) continue elif field_name == 'predictions_model_versions' and _filter.operator == Operator.NOT_CONTAINS: q = Q() for value in _filter.value: q &= ~Q(predictions__model_version__contains=value) filter_expressions.append(q) continue elif field_name == 'predictions_model_versions' and _filter.operator == Operator.EMPTY: value = cast_bool_from_str(_filter.value) filter_expressions.append(Q(predictions__model_version__isnull=value)) continue # use other name because of model names conflict if field_name == 'file_upload': field_name = 'file_upload_field' # annotate with cast to number if need if _filter.type == 'Number' and field_name.startswith('data__'): json_field = field_name.replace('data__', '') queryset = queryset.annotate(**{ f'filter_{json_field.replace("$undefined$", "undefined")}': Cast(KeyTextTransform(json_field, 'data'), output_field=FloatField()) }) clean_field_name = f'filter_{json_field.replace("$undefined$", "undefined")}' else: clean_field_name = field_name # special case: predictions, annotations, cancelled --- for them 0 is equal to is_empty=True if clean_field_name in ('total_predictions', 'total_annotations', 'cancelled_annotations') and \ _filter.operator == 'empty': _filter.operator = 'equal' if cast_bool_from_str(_filter.value) else 'not_equal' _filter.value = 0 # get type of annotated field value_type = 'str' if queryset.exists(): value_type = type(queryset.values_list(field_name, flat=True)[0]).__name__ if (value_type == 'list' or value_type == 'tuple') and 'equal' in _filter.operator: raise Exception('Not supported filter type') # special case: for strings empty is "" or null=True if _filter.type in ('String', 'Unknown') and _filter.operator == 'empty': value = cast_bool_from_str(_filter.value) if value: # empty = true q = Q( Q(**{field_name: None}) | Q(**{field_name+'__isnull': True}) ) if value_type == 'str': q |= Q(**{field_name: ''}) if value_type == 'list': q = Q(**{field_name: [None]}) else: # empty = false q = Q( ~Q(**{field_name: None}) & ~Q(**{field_name+'__isnull': True}) ) if value_type == 'str': q &= ~Q(**{field_name: ''}) if value_type == 'list': q = ~Q(**{field_name: [None]}) filter_expressions.append(q) continue # regex pattern check elif _filter.operator == 'regex': try: re.compile(pattern=str(_filter.value)) except Exception as e: logger.info('Incorrect regex for filter: %s: %s', _filter.value, str(e)) return queryset.none() # append operator field_name = f"{clean_field_name}{operators.get(_filter.operator, '')}" # in if _filter.operator == "in": cast_value(_filter) filter_expressions.append( Q( **{ f"{field_name}__gte": _filter.value.min, f"{field_name}__lte": _filter.value.max, } ), ) # not in elif _filter.operator == "not_in": cast_value(_filter) filter_expressions.append( ~Q( **{ f"{field_name}__gte": _filter.value.min, f"{field_name}__lte": _filter.value.max, } ), ) # in list elif _filter.operator == "in_list": filter_expressions.append( Q(**{f"{field_name}__in": _filter.value}), ) # not in list elif _filter.operator == "not_in_list": filter_expressions.append( ~Q(**{f"{field_name}__in": _filter.value}), ) # empty elif _filter.operator == 'empty': if cast_bool_from_str(_filter.value): filter_expressions.append(Q(**{field_name: True})) else: filter_expressions.append(~Q(**{field_name: True})) # starting from not_ elif _filter.operator.startswith("not_"): cast_value(_filter) filter_expressions.append(~Q(**{field_name: _filter.value})) # all others else: cast_value(_filter) filter_expressions.append(Q(**{field_name: _filter.value})) logger.debug(f'Apply filter: {filter_expressions}') if filters.conjunction == ConjunctionEnum.OR: result_filter = Q() for filter_expression in filter_expressions: result_filter.add(filter_expression, Q.OR) queryset = queryset.filter(result_filter) else: for filter_expression in filter_expressions: queryset = queryset.filter(filter_expression) return queryset
5e054ce3ab751f3445d2fc1b2923bb604048c40b
1,194
https://github.com/heartexlabs/label-studio.git
2,694
def apply_filters(queryset, filters, project): if not filters: return queryset # convert conjunction to orm statement filter_expressions = [] for _filter in filters.items: # we can also have annotations filters if not _filter.filter.startswith("filter:tasks:") or _filter.value is None: continue # django orm loop expression attached to column name preprocess_field_name = load_func(settings.PREPROCESS_FIELD_NAME) field_name, _ = preprocess_field_name(_filter.filter, project.only_undefined_field) # filter preprocessing, value type conversion, etc.. preprocess_filter = load_func(settings.DATA_MANAGER_PREPROCESS_FILTER) _filter = preprocess_filter(_filter, field_name) # custom expressions for enterprise custom_filter_expressions = load_func(settings.DATA_MANAGER_CUSTOM_FILTER_EXPRESSIONS) filter_expression = custom_filter_expressions(_filter, field_name) if filter_expression: filter_expressions.append(filter_expression) continue # annotators if field_name == 'annotators' and _filter.operator == Operator.CONTAINS: filter_expressions.append(Q(annotations__completed_by=int(_filter.value))) continue elif field_name == 'annotators' and _filter.operator == Operator.NOT_CONTAINS: filter_expressions.append(~Q(annotations__completed_by=int(_filter.value))) continue elif field_name == 'annotators' and _filter.operator == Operator.EMPTY: value = cast_bool_from_str(_filter.value) filter_expressions.append(Q(annotations__completed_by__isnull=value)) continue # annotations results & predictions results if field_name in ['annotations_results', 'predictions_results']: result = add_result_filter(field_name, _filter, filter_expressions, project) if result == 'exit': return queryset.none() elif result == 'continue': continue # annotation ids if field_name == 'annotations_ids': field_name = 'annotations__id' if 'contains' in _filter.operator: # convert string like "1 2,3" => [1,2,3] _filter.value = [int(value) for value in re.split(',|;| ', _filter.value) if value and value.isdigit()] _filter.operator = 'in_list' if _filter.operator == 'contains' else 'not_in_list' elif 'equal' in _filter.operator: if not _filter.value.isdigit(): _filter.value = 0 # annotators if field_name == 'annotators' and _filter.operator == Operator.CONTAINS: filter_expressions.append(Q(annotations__completed_by=int(_filter.value))) continue elif field_name == 'annotators' and _filter.operator == Operator.NOT_CONTAINS: filter_expressions.append(~Q(annotations__completed_by=int(_filter.value))) continue elif field_name == 'annotators' and _filter.operator == Operator.EMPTY: value = cast_bool_from_str(_filter.value) filter_expressions.append(Q(annotations__completed_by__isnull=value)) continue # predictions model versions if field_name == 'predictions_model_versions' and _filter.operator == Operator.CONTAINS: q = Q() for value in _filter.value: q |= Q(predictions__model_version__contains=value) filter_expressions.append(q) continue elif field_name == 'predictions_model_versions' and _filter.operator == Operator.NOT_CONTAINS: q = Q() for value in _filter.value: q &= ~Q(predictions__model_version__contains=value) filter_expressions.append(q) continue elif field_name == 'predictions_model_versions' and _filter.operator == Operator.EMPTY: value = cast_bool_from_str(_filter.value) filter_expressions.append(Q(predictions__model_version__isnull=value)) continue # use other name because of model names conflict if field_name == 'file_upload': field_name = 'file_upload_field' # annotate with cast to number if need if _filter.type == 'Number' and field_name.startswith('data__'): json_field = field_name.replace('data__', '') queryset = queryset.annotate(**{ f'filter_{json_field.replace("$undefined$", "undefined")}': Cast(KeyTextTransform(json_field, 'data'), output_field=FloatField()) }) clean_field_name = f'filter_{json_field.replace("$undefined$", "undefined")}' else: clean_field_name = field_name # special case: predictions, annotations, cancelled --- for them 0 is equal to is_empty=True if clean_field_name in ('total_predictions', 'total_annotations', 'cancelled_annotations') and \ _filter.operator == 'empty': _filter.operator = 'equal' if cast_bool_from_str(_filter.value) else 'not_equal' _filter.value = 0 # get type of annotated field value_type = 'str' if queryset.exists(): value_type = type(queryset.values_list(field_name, flat=True)[0]).__name__ if (value_type == 'list' or value_type == 'tuple') and 'equal' in _filter.operator: raise Exception('Not supported filter type') # special case: for strings empty is "" or null=True if _filter.type in ('String', 'Unknown') and _filter.operator == 'empty': value = cast_bool_from_str(_filter.value) if value: # empty = true q = Q( Q(**{field_name: None}) | Q(**{field_name+'__isnull': True}) ) if value_type == 'str': q |= Q(**{field_name: ''}) if value_type == 'list': q = Q(**{field_name: [None]}) else: # empty = false q = Q( ~Q(**{field_name: None}) & ~Q(**{field_name+'__isnull': True}) ) if value_type == 'str': q &= ~Q(**{field_name: ''}) if value_type == 'list': q = ~Q(**{field_name: [None]}) filter_expressions.append(q) continue # regex pattern check elif _filter.operator == 'regex': try: re.compile(pattern=str(_filter.value)) except Exception as e: logger.info('Incorrect regex for filter: %s: %s', _filter.value, str(e)) return queryset.none() # append operator field_name = f"{clean_field_name}{operators.get(_filter.operator, '')}" # in if _filter.operator == "in
74
2,132
apply_filters
14
0
2
6
homeassistant/components/zha/light.py
303,375
Improve type hints in zha light (#75947)
core
10
Python
14
light.py
async def async_update(self) -> None: if self._transitioning: self.debug("skipping async_update while transitioning") return await self.async_get_state()
9af64b1c3b1a712d24fc7b86ed2cc5e1fa613f26
26
https://github.com/home-assistant/core.git
57
async def async_update(self) -> None: if self._transitioning: self.debug("skipping asy
5
50
async_update
14
1
1
8
tests/openbb_terminal/stocks/discovery/test_finnhub_view.py
285,345
Here we merge all API Refactor related branches (#2236) * Update api.py * Updated forex menu * refactor ycrv command * refactor ycrv command black * refactor ecocal command * Minh changes * Adding space to test pushing * title fix ecocal df * get economic calendar annotation * fix investingcom tests * refactor index command * refactor overview command * give defaults to wsj view function args * rename date args investincom * refacto bigmac command * fix ecocal typo * refactor rtps command * alphavantage gdp * alphavantage gdp per capita * alphavantage cpi * alphavantage tyld * alphavantage inf * refactor macro command * refactor macro command w helpers * refactor treasury command * fix macro on terminal * treasury labels * refactor maturities * update treasury maturities doc strings * refactor get economic calendar finhub * refactor map command api * display map filter choices * route economy api to performance map * route economy api to performance map * display group choices on valuation command * refactor performance and valuation commands * refactor spectrum model and view * add choices to spectrum controller * delete image after view * fix model tests finviz * fix finciz view tests * refactor futures * fix some tests * fix more tests * fix controller test * refactor fred series notes * update fred notes docstring * refacto fred series ids * fix pred and qa when empty datasets * refactor fred * uncomment stuff * refacto get series data * fix some tests * set defaults on args * refactor fred yield curve * black * fix spell and remove ecocal names * fix linting * linting * pylint fix * change dangerous defaults * Working through crypto fixes (#2256) * Working through crypto fixes * Continued adding crypto stuff * Added crypto overview * Added test fixes * Added fixtures * Fixed tests * Fixed charting issue * Removed broken APIs * Final adjustments * Added test fixes * map get groups and get ycrv countries into old api * exposed econdb helper funcs * remove helpers * refactor search indices * linting * refactor arg currency * pylint from currency * Started switching crpyto ascending to ascend * Merging * Portfolio model arguements, params, and docstring * Refactored for etf commands (#2292) * Refactored for etf commands * Fixed tests * Added load command * Fixed menu * Portfolio logic fixes * Added econometrics (#2260) * Added econometrics * Fixed tests * Simplified API * Added test fixes * Added test csv * Allowed examples to be loaded * Fund refactor (#2291) * Fund refactor * Changed fund_name and fund to name * Changed ascending to ascend * Stock menu refactoring for easier API usage (#2194) * Stocks refactoring for easier API usage * Linting * Refactor newly added features * Linting * Fixing tests * Refactor common files used by stocks menu * Fixing flake8 * Fix linting and tests * Linting * Fix flake8 * refactor insider_data * refactor mentions * refactor watchlist * refactor sentiment * refactor sentiment * fix yahoofinance tests * refactor load and candle * refactor get_news and display_news * refactor stocks.ins.act * candle default matplotlib * fix yahoofinance_view tests * fix ark model tests * fix ark view tests * fix business insider model * fix business insider view * refactor csimarket model * fix tests csi market model * update dd controller * fix get suppliers tests * fix dd controller tests * fix finhub tests * fix finviz tests * fix fmp tests * fix marketwatch tests * corrected argument keywords in test_bt_model * corrected argument keywords in test_bt_view * refactor fa controller * refactor marketwatch view * refactor gov controller * fix tests fa av * fix tests elect * fix dcf tests * fix polygon tests * fix fmp tests * fix quiverquant tests * fix yahoofinance fa tests * fix more fa tests * fix insider tests * fix more tests * fix more tests * fix options tests * fix stock gov tests * fix tests test_ba_controller * fix tests for test_finviz_compare_model.py * fixed 2 tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fix final tests * fixed tests * fixed tests * Fix tests * black * forgot to black tests * fixed tests * fixed tests * fixed tests * fixed tests * flakefix * Tests + code : Stocks / Discovery * fix tests * added recorder * fixed tests * fixed tests * black * black * remove unused imports * refactor display raw * sia dicts fix * pylint * linting * remove dangerous default * fix tests * fix beta model test * black * skip screener qa test * change sector path to sectors * update tests readme * fix metric defaults * black * substitute lost ticker * defaults cpic * another round on sia * refactor cramer * reduce default tweets on sentiment * refactor yf hist, corr, volume * arkorders default * refactor income, balance, cashflow * refacto scorr, screener, getfinnhub * refactor stockgrid * ibkr refactor * another round on stockgrid * add dividens end point * refactor discovery endpoints * update docstrings with similar input * refactor messages * refactor ba * refactor regioons * refactor twitter sentiment * refactor hist * refactor regions * give default to timeframe * refactor bunch of defaults and arg names * remove leftover imports * refactor vwap * let tests run * fix tests * fix stock tests * fix stockanalysis tests * flake * MYPY * Made important changes * added fixes * Fixed big issue * Added fixes to tests * fix qa tests * fix tests * fix 1 more test * last stocks failing * fix crypto test Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: montezdesousa <montezdesousa@gmail.com> Co-authored-by: hjoaquim <h.joaquim@campus.fct.unl.pt> Co-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com> Co-authored-by: colin99d <colin99delahunty@gmail.com> * fix portfolio tests * change period to window * update ca docstrings * refactor get_similar_companies func * Fixed * Update CI * Update CI 2 * Update CI 3 * Update dependencies Co-authored-by: colin99d <colin99delahunty@gmail.com> Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com> Co-authored-by: montezdesousa <montezdesousa@gmail.com> Co-authored-by: James Simmons <simmonsj330@gmail.com> Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com> Co-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com> Co-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com> Co-authored-by: northern-64bit <75195383+northern-64bit@users.noreply.github.com> Co-authored-by: hjoaquim <h.joaquim@campus.fct.unl.pt>
OpenBBTerminal
10
Python
13
test_finnhub_view.py
def test_past_ipo_empty_df(mocker): mocker.patch( "openbb_terminal.stocks.discovery.finnhub_view.finnhub_model.get_ipo_calendar", return_value=pd.DataFrame(), ) finnhub_view.past_ipo( num_days_behind=2, start_date="2021-12-01", limit=20, export="" ) @pytest.mark.vcr(record_mode="none") @pytest.mark.record_stdout
9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b
@pytest.mark.vcr(record_mode="none") @pytest.mark.record_stdout
40
https://github.com/OpenBB-finance/OpenBBTerminal.git
44
def test_past_ipo_empty_df(mocker): mocker.patch(
17
95
test_past_ipo_empty_df
77
0
1
25
sklearn/semi_supervised/tests/test_self_training.py
259,194
MNT Replace if_delegate_has_method with available_if in ensemble and semi_supervised (#20545) Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com> Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>
scikit-learn
14
Python
53
test_self_training.py
def test_base_estimator_meta_estimator(): # Check that a meta-estimator relying on an estimator implementing # `predict_proba` will work even if it does expose this method before being # fitted. # Non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/19119 base_estimator = StackingClassifier( estimators=[ ("svc_1", SVC(probability=True)), ("svc_2", SVC(probability=True)), ], final_estimator=SVC(probability=True), cv=2, ) assert hasattr(base_estimator, "predict_proba") clf = SelfTrainingClassifier(base_estimator=base_estimator) clf.fit(X_train, y_train_missing_labels) clf.predict_proba(X_test) base_estimator = StackingClassifier( estimators=[ ("svc_1", SVC(probability=False)), ("svc_2", SVC(probability=False)), ], final_estimator=SVC(probability=False), cv=2, ) assert not hasattr(base_estimator, "predict_proba") clf = SelfTrainingClassifier(base_estimator=base_estimator) with pytest.raises(AttributeError): clf.fit(X_train, y_train_missing_labels)
a794c58692a1f3e7a85a42d8c7f7ddd5fcf18baa
155
https://github.com/scikit-learn/scikit-learn.git
231
def test_base_estimator_meta_estimator(): # Check that a meta-estimator relying on an estimator implementing # `predict_proba` will work even if it does expose this method before being # fitted. # Non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/19119 base_estimator = StackingClassifier( estimators=[ ("svc_1", SVC(probability=True)), ("svc_2", SVC(probability=True)), ], final_estimator=SVC(probability=True), cv=2, ) assert hasat
19
250
test_base_estimator_meta_estimator
45
0
4
16
rllib/tests/test_io.py
128,982
[RLlib] QMIX better defaults + added to CI learning tests (#21332)
ray
15
Python
39
test_io.py
def test_agent_input_eval_sim(self): for fw in framework_iterator(): self.write_outputs(self.test_dir, fw) agent = PGTrainer( env="CartPole-v0", config={ "input": self.test_dir + fw, "input_evaluation": ["simulation"], "framework": fw, }) for _ in range(50): result = agent.train() if not np.isnan(result["episode_reward_mean"]): return # simulation ok time.sleep(0.1) assert False, "did not see any simulation results"
abd3bef63b486fe3e43c1608d93205a702880414
93
https://github.com/ray-project/ray.git
262
def test_agent_input_eval_sim(self): for fw in framework_iterator(): self.write_outputs(self.test_dir, fw) agent = PGTrainer( env="CartPole-v0", config={ "input": self.test_dir + fw
18
154
test_agent_input_eval_sim
97
0
6
27
ppdet/modeling/heads/s2anet_head.py
211,220
refactor s2anet (#6604) * refactor s2anet to support batch_size > 1 * fix problem of inference * support batch_size > 1 for training * fix empty results * fix dota eval * fix configs of s2anet_head * modify s2anet_spine_1x to 73 mAP
PaddleDetection
18
Python
62
s2anet_head.py
def get_bboxes_single(self, cls_score_list, bbox_pred_list): mlvl_bboxes = [] mlvl_scores = [] for cls_score, bbox_pred in zip(cls_score_list, bbox_pred_list): if self.use_sigmoid_cls: scores = F.sigmoid(cls_score) else: scores = F.softmax(cls_score, axis=-1) if scores.shape[0] > self.nms_pre: # Get maximum scores for foreground classes. if self.use_sigmoid_cls: max_scores = paddle.max(scores, axis=1) else: max_scores = paddle.max(scores[:, :-1], axis=1) topk_val, topk_inds = paddle.topk(max_scores, self.nms_pre) bbox_pred = paddle.gather(bbox_pred, topk_inds) scores = paddle.gather(scores, topk_inds) mlvl_bboxes.append(bbox_pred) mlvl_scores.append(scores) mlvl_bboxes = paddle.concat(mlvl_bboxes) mlvl_scores = paddle.concat(mlvl_scores) mlvl_polys = rbox2poly(mlvl_bboxes).unsqueeze(0) mlvl_scores = paddle.transpose(mlvl_scores, [1, 0]).unsqueeze(0) bbox, bbox_num, _ = self.nms(mlvl_polys, mlvl_scores) if bbox.shape[0] <= 0: bbox = self.fake_bbox bbox_num = self.fake_bbox_num return bbox, bbox_num
b4727677751081b257c6fa23c3c124ab9e5a32a1
237
https://github.com/PaddlePaddle/PaddleDetection.git
401
def get_bboxes_single(self, cls_score_list, bbox_pred_list): mlvl_bboxes = [] mlvl_scores = [] for cls_score, bbox_pred in zip(cls_score_list, bbox_pred_list): if self.use_sigmoid_cls: scores = F.sigmoid(cls_score) else: scores = F.softmax(cls_score, axis=-1) if scores.shape[0] > self.nms_pre: # Get maximum scores for foreground classes. if self.use_sigmoid_cls: max_scores = paddle.max(scores, axis=1) else: max_scores = paddle.max(scores[:, :-1], axis=1) topk_val, topk_inds = paddle.topk(max_scores, self.nms_pre) bbox_pred = paddle.gather(bbox_pred, topk_inds) scores = paddle.gather(scores, topk_inds) mlvl_bboxes.append(bbox_pred) mlvl_scores.append(scores) mlvl_bboxes = paddle.concat(mlvl_bboxes) mlvl_scores = paddle.concat(ml
36
367
get_bboxes_single
26
0
1
15
tests/components/onvif/__init__.py
296,882
Add diagnostics to ONVIF (#69708)
core
9
Python
21
__init__.py
def setup_mock_device(mock_device): mock_device.async_setup = AsyncMock(return_value=True) mock_device.available = True mock_device.name = NAME mock_device.info = DeviceInfo( MANUFACTURER, MODEL, FIRMWARE_VERSION, SERIAL_NUMBER, MAC, ) mock_device.capabilities = Capabilities() mock_device.profiles = []
22db21b9d4a7c5c2a79d43d8edf5295aeb99c13d
63
https://github.com/home-assistant/core.git
85
def setup_mock_device(mock_device): mock_device.async_setup = AsyncMock(return_value=True) mock_device.a
18
89
setup_mock_device
28
0
1
10
tests/sentry/integrations/msteams/test_message_builder.py
93,205
test(msteams): Add tests for building group card (#36834) Add tests for build_group_card which builds issues cards. Does NOT test all visual aspects of the card. Only ensures that certain important elements are present and the basic structure of the card is correct.
sentry
11
Python
23
test_message_builder.py
def test_resolved_issue_message(self): self.group1.status = GroupStatus.RESOLVED self.group1.save() issue_card = build_group_card( group=self.group1, event=self.event1, rules=self.rules, integration=self.integration ) action_set = issue_card["body"][2]["items"][0] resolve_action = action_set["actions"][0] assert ActionType.SUBMIT == resolve_action["type"] assert "Unresolve" == resolve_action["title"]
db35e231ceababe8c9f5ca7b5d2ca685f07c7d5b
89
https://github.com/getsentry/sentry.git
94
def test_resolved_issue_message(self): self.group1.status =
18
144
test_resolved_issue_message
39
0
1
8
tests/models/mvp/test_modeling_mvp.py
31,875
Add MVP model (#17787) * Add MVP model * Update README * Remove useless module * Update docs * Fix bugs in tokenizer * Remove useless test * Remove useless module * Update vocab * Remove specifying * Remove specifying * Add #Copied ... statement * Update paper link * Remove useless TFMvp * Add #Copied ... statement * Fix style in test mvp model * Fix some typos * Fix properties of unset special tokens in non verbose mode * Update paper link * Update MVP doc * Update MVP doc * Fix README * Fix typos in docs * Update docs
transformers
13
Python
33
test_modeling_mvp.py
def test_shift_tokens_right(self): input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long) shifted = shift_tokens_right(input_ids, 1, 2) n_pad_before = input_ids.eq(1).float().sum() n_pad_after = shifted.eq(1).float().sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(torch.eq(shifted[:, 0], 2).all())
3cff4cc58730409c68f8afa2f3b9c61efa0e85c6
137
https://github.com/huggingface/transformers.git
87
def test_shift_tokens_right(self): input_ids = torch.tensor([[71, 82, 18, 33,
18
200
test_shift_tokens_right
21
0
1
7
python3.10.4/Lib/distutils/tests/test_archive_util.py
223,027
add python 3.10.4 for windows
XX-Net
10
Python
19
test_archive_util.py
def test_make_archive_tar(self): base_dir = self._create_files() base_name = os.path.join(self.mkdtemp() , 'archive') res = make_archive(base_name, 'tar', base_dir, 'dist') self.assertTrue(os.path.exists(res)) self.assertEqual(os.path.basename(res), 'archive.tar') self.assertEqual(self._tarinfo(res), self._created_files)
8198943edd73a363c266633e1aa5b2a9e9c9f526
83
https://github.com/XX-net/XX-Net.git
63
def test_make_archive_tar(self): base_dir = self._create_files() base_name = os.path.join(self.mkdtemp() , 'archive') res = make_archive(base_name, 'tar', base_dir, 'dist') self.assertTrue(os.path.exists(res)) self.assertEqual(os.path.basename(res), 'archive.tar') self.assertEqual(self._tarinfo(res), self._created_files)
17
137
test_make_archive_tar
23
0
3
6
homeassistant/components/zha/lock.py
295,087
Bump zigpy to 0.44.1 and zha-quirks to 0.0.69 (#68921) * Make unit tests pass * Flip response type check to not rely on it being a list https://github.com/zigpy/zigpy/pull/716#issuecomment-1025236190 * Bump zigpy and quirks versions to ZCLR8 releases * Fix renamed zigpy cluster attributes * Handle the default response for ZLL `get_group_identifiers` * Add more error context to `stage failed` errors * Fix unit test returning lists as ZCL request responses * Always load quirks when testing ZHA * Bump zha-quirks to 0.0.69
core
10
Python
23
lock.py
async def async_lock(self, **kwargs): result = await self._doorlock_channel.lock_door() if isinstance(result, Exception) or result[0] is not Status.SUCCESS: self.error("Error with lock_door: %s", result) return self.async_write_ha_state()
0f6296e4b520ec8daf0f12e7b6db3c863c811ae8
51
https://github.com/home-assistant/core.git
73
async def async_lock(self, **kwargs): result = await self._doorlock_channel.lock_door() if isinstance(result, Exception) or result[0] is not Status.SUCCESS:
12
86
async_lock
11
0
1
2
src/transformers/utils/fx.py
38,741
Traced models serialization and torchscripting fix (#17206) * Fix torch.jit.script and pickling issues * Fix get_attr issues * Fix import in function * Fix GPT-J and T5 tracing for torch=1.11 * Gate graph surgery on torch version * Modeling minor changes to enable TorchScripting * Model serialization / deserialization test * Remove _assert_is_none users
transformers
8
Python
10
fx.py
def __setitem__(self, indices, values): return self.tracer.create_proxy("call_function", operator.setitem, (self, indices, values), {})
2e7e4280aa6f380a4e3afad6524295a17901c56c
33
https://github.com/huggingface/transformers.git
17
def __setitem__(self, indices, values): return self.tracer.create_proxy("call_function", operator.setitem, (self, in
8
47
__setitem__
6
0
1
4
keras/optimizers/optimizers_test.py
275,632
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
12
Python
6
optimizers_test.py
def test_rmsprop(self): with self.cached_session(): self._test_optimizer(optimizer_v1.RMSprop()) self._test_optimizer(optimizer_v1.RMSprop(decay=1e-3))
84afc5193d38057e2e2badf9c889ea87d80d8fbf
37
https://github.com/keras-team/keras.git
34
def test_rmsprop(self): with self.cached_session(): self._te
7
61
test_rmsprop
18
0
1
6
homeassistant/components/ws66i/media_player.py
300,151
Add ws66i core integration (#56094) * Add ws66i core integration * Remove all ws66i translations * Update ws66i unit tests to meet minimum code coverage * Update ws66i based on @bdraco review * General improvements after 2nd PR review * Disable entities if amp shutoff, set default source names, set 30sec polling * Add _attr_ and change async_on_unload * Improve entity generation * Implement coordinator * Made options fields required, retry connection on failed attempts, use ZoneStatus for attributes * Refactor WS66i entity properties, raise HomeAssistantError on restore service if no snapshot * Update to pyws66i v1.1 * Add quality scale of silver to manifest * Update config_flow test
core
11
Python
15
media_player.py
async def async_set_volume_level(self, volume): await self.hass.async_add_executor_job( self._ws66i.set_volume, self._zone_id, int(volume * MAX_VOL) ) self._status.volume = int(volume * MAX_VOL) self._async_update_attrs_write_ha_state()
5e737bfe4fbc5a724f5fdf04ea9319c2224cb114
49
https://github.com/home-assistant/core.git
64
async def async_set_volume_level(self, volume): await self.hass.async_add_executor_job( self._ws66i.set_volume, self._zone_id, int(
12
80
async_set_volume_level
29
0
1
8
rllib/examples/two_trainer_workflow.py
147,686
[RLlib] Examples folder: All `training_iteration` translations. (#23712)
ray
10
Python
28
two_trainer_workflow.py
def get_default_config(cls) -> TrainerConfigDict: # Run this Trainer with new `training_iteration` API and set some PPO-specific # parameters. return with_common_config( { "_disable_execution_plan_api": True, "num_sgd_iter": 10, "sgd_minibatch_size": 128, } )
434265edd0926f7838cca4dbce00e88149e6bbf0
25
https://github.com/ray-project/ray.git
123
def get_default_config(cls) -> TrainerConfigDict: # Run this Trainer with new `training_iteration` API and set some PPO-specific # parameters. return with_common_config(
4
45
get_default_config
7
0
1
3
python/ray/_private/runtime_env/utils.py
129,516
[1/n][cross-language runtime env] runtime env protobuf refactor (#21551) We need to support runtime env for java、c++ and cross-language. This PR only do a refactor of protobuf. Related issue #21731
ray
9
Python
7
utils.py
def has_conda(self) -> str: return self._proto_runtime_env.python_runtime_env.HasField( "conda_runtime_env")
f8e41215b3fc8f45660e6afac4fe6faad73287f4
18
https://github.com/ray-project/ray.git
24
def has_conda(self) -> str: return self._proto_runtime_env.python_runtime_env.HasField( "conda_runtime_en
6
31
has_conda
117
0
3
39
erpnext/regional/report/eway_bill/eway_bill.py
67,202
style: format code with black
erpnext
18
Python
98
eway_bill.py
def get_data(filters): conditions = get_conditions(filters) data = frappe.db.sql( % conditions, as_dict=1, ) unit = { "Bag": "BAGS", "Bottle": "BOTTLES", "Kg": "KILOGRAMS", "Liter": "LITERS", "Meter": "METERS", "Nos": "NUMBERS", "PKT": "PACKS", "Roll": "ROLLS", "Set": "SETS", } # Regular expression set to remove all the special characters special_characters = r"[$%^*()+\\[\]{};':\"\\|<>.?]" for row in data: set_defaults(row) set_taxes(row, filters) set_address_details(row, special_characters) # Eway Bill accepts date as dd/mm/yyyy and not dd-mm-yyyy row.posting_date = "/".join(str(row.posting_date).replace("-", "/").split("/")[::-1]) row.lr_date = "/".join(str(row.lr_date).replace("-", "/").split("/")[::-1]) if row.gst_vehicle_type == "Over Dimensional Cargo (ODC)": row.gst_vehicle_type = "ODC" row.item_name = re.sub(special_characters, " ", row.item_name) row.description = row.item_name row.uom = unit.get(row.uom, row.uom) # For removing special charactes and numbers from customer. row.customer = re.sub(special_characters[:-1] + "&0-9" + "]", "", row.customer) return data
494bd9ef78313436f0424b918f200dab8fc7c20b
235
https://github.com/frappe/erpnext.git
83
def get_data(filters): conditions = get_conditions(filters) data = frappe.db.sql( % conditions, as_dict=1, ) unit = { "Bag": "BAGS", "Bottle": "BOTTLES", "Kg": "KILOGRAMS", "Liter": "LITERS", "Meter": "METERS", "Nos": "NUMBERS", "PKT": "PACKS", "Roll": "ROLLS", "Set": "SETS", } # Regular expression set to remove all the special characters special_characters
29
423
get_data
18
0
1
9
tests/sentry/replays/test_organization_issue_replay_count.py
89,383
feat(replays): Add issue replay count endpoint (#41996) ## What this PR does We create an endpoint that provides accurate counts of replay_ids associated with issue ids within a timeframe. The endpoint will return a format like so: ``` { issue_id_1: 1, issue_id_2: 20, etc. etc. } ``` ### Constraints - Between 1 and 25 issue_ids may be passed in (25 being the current pagination size of the issue stream) - We will only count up to 50 replay_ids, 50 chosen somewhat arbitrarily but seems a reasonable size. - We will only retrieve up to 100 replay_ids per issue_id. this means that if over half of replay_ids are sampled, it's possible that we may undercount replays as we will miss some. This is a small edge case and is acceptable. ## Modifications: - We modify discover.py to allow for private_use of the `groupArray` clickhouse function - In our endpoint we use QueryBuilder to hit the discover dataset, then query the replays dataset with the replay_ids returned - We then count up each replay_id confirmed to exist by its associated issue_id, and return the resulting dict ### Why are we doing this? Because of sampling / dropped data, events can have replay_ids that don't exist. this is nominally fine, although it results in missing counts / the product not seeming trustworthy in places. Fixes https://github.com/getsentry/replay-backend/issues/190
sentry
14
Python
16
test_organization_issue_replay_count.py
def setUp(self): super().setUp() self.min_ago = before_now(minutes=1) self.login_as(user=self.user) self.url = reverse( "sentry-api-0-organization-issue-replay-count", kwargs={"organization_slug": self.project.organization.slug}, ) self.features = {"organizations:session-replay": True}
6e2d3d461e9638981b6619952f59f78e44a93917
64
https://github.com/getsentry/sentry.git
81
def setUp(self): super().setUp() self.min_ago = before_now(minutes=1) self.login_as(user=self.user) self.url = reverse( "sentry-api-0-organization-issue-replay-count",
15
108
setUp
66
1
1
16
tests/models/opt/test_modeling_opt.py
30,686
Fix expected value for OPT test `test_inference_no_head` (#17395) * Fix expected value * 5e-5 Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
transformers
12
Python
61
test_modeling_opt.py
def test_inference_no_head(self): model = OPTModel.from_pretrained("facebook/opt-350m").to(torch_device) input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) with torch.no_grad(): output = model(input_ids=input_ids).last_hidden_state expected_shape = torch.Size((1, 11, 512)) self.assertEqual(output.shape, expected_shape) # expected value works for CPU, as well as GPU (with TF32 disabled) expected_slice = torch.tensor( [ [-0.28726277, -1.9241608, -0.3058734], [-1.2737825, -0.13332152, -0.18766522], [0.41159445, 0.1191957, -1.3107123], ], device=torch_device, ) assert_tensors_close(output[0, :3, :3], expected_slice, atol=5e-5) @require_torch @slow
4d727bd2dff377caeab21ff4e1bf4b26c2397c8a
@require_torch @slow
173
https://github.com/huggingface/transformers.git
215
def test_inference_no_head(self): model = OPTModel.from_pretrained("facebook/opt-350m").to(torch_device) i
24
229
test_inference_no_head
49
0
1
11
jina/parsers/orchestrate/runtimes/remote.py
13,648
docs: fix port and protocol description for the gateway (#5456) Co-authored-by: Jina Dev Bot <dev-bot@jina.ai>
jina
14
Python
43
remote.py
def mixin_gateway_protocol_parser(parser): from jina.enums import GatewayProtocolType parser.add_argument( '--protocol', '--protocols', nargs='+', type=GatewayProtocolType.from_string, choices=list(GatewayProtocolType), default=[GatewayProtocolType.GRPC], help=f'Communication protocol of the server exposed by the Gateway. This can be a single value or a list of protocols, depending on your chosen Gateway. Choose the convenient protocols from: {[protocol.to_string() for protocol in list(GatewayProtocolType)]}.', )
47da80beca9cd60db51081594f1fcc5bee6d1246
51
https://github.com/jina-ai/jina.git
110
def mixin_gateway_protocol_parser(parser): from jina.enums import GatewayProtocolType parser.add_argument( '--protocol', '--protocols', nargs='+', type=GatewayProtocolType.from_string, choices=l
16
106
mixin_gateway_protocol_parser
25
0
1
5
tests/test_console.py
161,881
Test to ensure NullFile set as Console.file when stdout null
rich
8
Python
24
test_console.py
def test_console_null_file(monkeypatch): # When stdout and stderr are null, Console.file should be replaced with NullFile monkeypatch.setattr("sys.stdout", None) monkeypatch.setattr("sys.stderr", None) console = Console() assert isinstance(console.file, NullFile)
97bffbc7b1640dfc7bc20809dc0b9d1b536d7644
35
https://github.com/Textualize/rich.git
39
def test_console_null_file(monkeypatch): # When stdout and stderr are null, Console.file should be replaced with NullFile monkeypatch.setattr("sys.stdout", None)
8
60
test_console_null_file
49
0
1
11
wagtail/admin/tests/pages/test_delete_page.py
71,510
Reformat with black
wagtail
10
Python
38
test_delete_page.py
def setUp(self): # Find root page self.root_page = Page.objects.get(id=2) # Add child page self.child_page = SimplePage( title="Hello world!", slug="hello-world", content="hello" ) self.root_page.add_child(instance=self.child_page) # Add a page with child pages of its own self.child_index = StandardIndex(title="Hello index", slug="hello-index") self.root_page.add_child(instance=self.child_index) self.grandchild_page = StandardChild(title="Hello Kitty", slug="hello-kitty") self.child_index.add_child(instance=self.grandchild_page) # Login self.user = self.login()
d10f15e55806c6944827d801cd9c2d53f5da4186
110
https://github.com/wagtail/wagtail.git
150
def setUp(self): # Find root page self.root_page = Page.objects.get(id=2) # Add child page self.child_page = SimplePage( title="Hello world!", slug="hello-world", content="hello" ) self.root_page.add_child(instance=self.child_page) # Add a page with child pages of its own self.child_index = StandardIndex(title="Hello index", slug="hello-index") self.root_page.add_child(instance=self
20
189
setUp
16
0
1
8
tests/test_client.py
54,216
Fix bug where log workers would access a cached client across event loops
prefect
10
Python
14
test_client.py
async def test_client_can_opt_out_of_lifespan_management(self): startup, shutdown = MagicMock(), MagicMock() app = FastAPI(on_startup=[startup], on_shutdown=[shutdown]) client = OrionClient(app, manage_ephemeral_lifespan=False)
ef229fdb02297bcfff6aa95f210ce73e35074b99
57
https://github.com/PrefectHQ/prefect.git
36
async def test_client_can_opt_out_of_lifespan_management(self): startup, shutdown = MagicMock(), MagicMock() app = FastAPI(on_startup=[startup], on_shutdown=[shutdown]
12
68
test_client_can_opt_out_of_lifespan_management
53
1
3
14
pandas/tests/resample/test_base.py
164,179
TST: Use more xfail instead of skip (#45719)
pandas
14
Python
48
test_base.py
def test_resample_empty_series(freq, empty_series_dti, resample_method, request): # GH12771 & GH12868 if resample_method == "ohlc" and isinstance(empty_series_dti.index, PeriodIndex): request.node.add_marker( pytest.mark.xfail( reason=f"GH13083: {resample_method} fails for PeriodIndex" ) ) ser = empty_series_dti result = getattr(ser.resample(freq), resample_method)() expected = ser.copy() expected.index = _asfreq_compat(ser.index, freq) tm.assert_index_equal(result.index, expected.index) assert result.index.freq == expected.index.freq tm.assert_series_equal(result, expected, check_dtype=False) @all_ts @pytest.mark.parametrize("freq", ["M", "D", "H"])
bfe2d528a9398679acf05ffcdc60d3c181e0f17e
@all_ts @pytest.mark.parametrize("freq", ["M", "D", "H"])
116
https://github.com/pandas-dev/pandas.git
128
def test_resample_empty_series(freq, empty_series_dti, resample_method, request): # GH12771 & GH12868 if resample_method == "ohlc" and i
27
219
test_resample_empty_series
28
0
3
7
torch/testing/_comparison.py
102,547
make meta tensor data access error message for expressive in assert_close (#68802) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/68802 Without this patch, the error message of comparing meta tensors looks like this after #68722 was merged: ```python >>> t = torch.empty((), device="meta") >>> assert_close(t, t) NotImplementedError: Could not run 'aten::abs.out' with arguments from the 'Meta' backend. [...] [...] The above exception was the direct cause of the following exception: [...] RuntimeError: Comparing TensorLikePair( id=(), actual=tensor(..., device='meta', size=()), expected=tensor(..., device='meta', size=()), rtol=1.3e-06, atol=1e-05, equal_nan=False, check_device=True, check_dtype=True, check_layout=True, check_stride=False, check_is_coalesced=True, ) resulted in the unexpected exception above. If you are a user and see this message during normal operation please file an issue at https://github.com/pytorch/pytorch/issues. If you are a developer and working on the comparison functions, please except the previous error and raise an expressive `ErrorMeta` instead. ``` Thus, we follow our own advice and turn it into an expected exception until #68592 is resolved: ```python >>> t = torch.empty((), device="meta") >>> assert_close(t, t) ValueError: Comparing meta tensors is currently not supported ``` Test Plan: Imported from OSS Reviewed By: ngimel Differential Revision: D33542999 Pulled By: mruberry fbshipit-source-id: 0fe1ddee15b5decdbd4c5dd84f03804ca7eac95b
pytorch
13
Python
26
_comparison.py
def _handle_meta_tensor_data_access(self): try: yield except NotImplementedError as error: if "meta" not in str(error).lower(): raise error # TODO: See https://github.com/pytorch/pytorch/issues/68592 raise self._make_error_meta(NotImplementedError, "Comparing meta tensors is currently not supported.")
8d05174defd689cb1cb2346e0cde5b7fa572814a
38
https://github.com/pytorch/pytorch.git
108
def _handle_meta_tensor_data_access(self): try: yield except NotImplementedError as error: if "meta" not in str(error).lower(): raise error # TODO: See https://github.com/pytorch/pytorch/issues/68592 raise self._make_error_meta(NotImplementedError, "Compa
7
71
_handle_meta_tensor_data_access
105
0
7
26
homeassistant/components/auth/login_flow.py
313,425
Add FlowResultType enum to data entry flow (#72955)
core
15
Python
80
login_flow.py
async def _async_flow_result_to_response(self, request, client_id, result): if result["type"] != data_entry_flow.FlowResultType.CREATE_ENTRY: # @log_invalid_auth does not work here since it returns HTTP 200. # We need to manually log failed login attempts. if ( result["type"] == data_entry_flow.FlowResultType.FORM and (errors := result.get("errors")) and errors.get("base") in ( "invalid_auth", "invalid_code", ) ): await process_wrong_login(request) return self.json(_prepare_result_json(result)) result.pop("data") hass: HomeAssistant = request.app["hass"] result_obj: Credentials = result.pop("result") # Result can be None if credential was never linked to a user before. user = await hass.auth.async_get_user_by_credentials(result_obj) if user is not None and ( user_access_error := async_user_not_allowed_do_auth(hass, user) ): return self.json_message( f"Login blocked: {user_access_error}", HTTPStatus.FORBIDDEN ) await process_success_login(request) result["result"] = self._store_result(client_id, result_obj) return self.json(result)
f91aa33c5f7bc13ed031a95f946f70e11af1e2f3
169
https://github.com/home-assistant/core.git
420
async def _async_flow_result_to_response(self, request, client_id, result): if result["type"] != data_entry_flow.FlowResultType.CREATE_ENTRY: # @log_invalid_auth does not work here since it returns HTTP 200. # We need to manually log failed login attempts. if ( result["type"] == data_entry_flow.FlowResultType.FORM and (errors := result.get("errors")) and errors.get("base") in ( "invalid_auth", "invalid_code", ) ): await process_wrong_login(request) return self.json(_prepare_result_json(result)) result.pop("data") hass: HomeAssistant = request.app["hass"] result_obj: Credentials = result.pop("result") # Result can be None if credential was never linked to a user before. user = await hass.auth.async_get_user_by_credentials(result_obj) if user is not None and ( user_access_error := async_user_not_allowed_do_auth(hass, user)
30
289
_async_flow_result_to_response
23
0
2
8
homeassistant/components/avea/light.py
318,227
Improve type hints in light [a-i] (#75936) * Improve type hints in ads light * Improve type hints in avea light * Improve type hints in avion light * Improve type hints in broadlink light * More type hints * One more
core
13
Python
20
light.py
def update(self) -> None: if (brightness := self._light.get_brightness()) is not None: self._attr_is_on = brightness != 0 self._attr_brightness = round(255 * (brightness / 4095))
20fec104e2a11b1a5164d7fe779eb0d894e098cf
45
https://github.com/home-assistant/core.git
59
def update(self) -> None:
8
74
update
117
0
4
60
src/documents/consumer.py
319,877
When raising an exception during exception handling, chain them together for slightly cleaner logs
paperless-ngx
21
Python
84
consumer.py
def run_post_consume_script(self, document): if not settings.POST_CONSUME_SCRIPT: return if not os.path.isfile(settings.POST_CONSUME_SCRIPT): self._fail( MESSAGE_POST_CONSUME_SCRIPT_NOT_FOUND, f"Configured post-consume script " f"{settings.POST_CONSUME_SCRIPT} does not exist.", ) self.log( "info", f"Executing post-consume script {settings.POST_CONSUME_SCRIPT}", ) script_env = os.environ.copy() script_env["DOCUMENT_ID"] = str(document.pk) script_env["DOCUMENT_CREATED"] = str(document.created) script_env["DOCUMENT_MODIFIED"] = str(document.modified) script_env["DOCUMENT_ADDED"] = str(document.added) script_env["DOCUMENT_FILE_NAME"] = document.get_public_filename() script_env["DOCUMENT_SOURCE_PATH"] = os.path.normpath(document.source_path) script_env["DOCUMENT_ARCHIVE_PATH"] = os.path.normpath( str(document.archive_path), ) script_env["DOCUMENT_THUMBNAIL_PATH"] = os.path.normpath( document.thumbnail_path, ) script_env["DOCUMENT_DOWNLOAD_URL"] = reverse( "document-download", kwargs={"pk": document.pk}, ) script_env["DOCUMENT_THUMBNAIL_URL"] = reverse( "document-thumb", kwargs={"pk": document.pk}, ) script_env["DOCUMENT_CORRESPONDENT"] = str(document.correspondent) script_env["DOCUMENT_TAGS"] = str( ",".join(document.tags.all().values_list("name", flat=True)), ) try: Popen( ( settings.POST_CONSUME_SCRIPT, str(document.pk), document.get_public_filename(), os.path.normpath(document.source_path), os.path.normpath(document.thumbnail_path), reverse("document-download", kwargs={"pk": document.pk}), reverse("document-thumb", kwargs={"pk": document.pk}), str(document.correspondent), str(",".join(document.tags.all().values_list("name", flat=True))), ), env=script_env, ).wait() except Exception as e: self._fail( MESSAGE_POST_CONSUME_SCRIPT_ERROR, f"Error while executing post-consume script: {e}", exc_info=True, exception=e, )
b70e21a6d50bfc84e76fd68ce0b8c22b0928ff51
383
https://github.com/paperless-ngx/paperless-ngx.git
781
def run_post_consume_script(self, document): if not settings.POST_CONSUME_SCRIPT: return if not os.path.isfile(settings.POST_CONSUME_SCRIPT): self._fail( MESSAGE_POST_CONSUME_SCRIPT_NOT_FOUND, f"Configured post-consume script " f"{settings.POST_CONSUME_SCRIPT} does not exist.", ) self.log( "info", f"Executing post-consume script {settings.POST_CONSUME_SCRIPT}", ) script_env = os.environ.copy() script_env["DOCUMENT_ID"] = str(document.pk) script_env["DOCUMENT_CREATED"] = str(document.created) script_env["DOCUMENT_MODIFIED"] = str(document.modified) script_env["DOCUMENT_ADDED"] = str(document.added) script_env["DOCUMENT_FILE_NAME"] = document.get_public_filename() script_env["DOCUMENT_SOURCE_PATH"] = os.path.normpath(document.source_path)
40
648
run_post_consume_script
39
0
1
15
tests/sentry/snuba/metrics/test_metrics_layer/test_metrics_enhanced_performance.py
88,393
feat(metrics): Add support for wildcard searches (#41114)
sentry
12
Python
34
test_metrics_enhanced_performance.py
def test_broken_custom_metric(self, mock): # Store valid metric self.store_transaction_metric( 1, metric="measurements.something_custom", internal_metric="d:transactions/measurements.something_custom@millisecond", entity="metrics_distributions", timestamp=self.day_ago + timedelta(hours=1, minutes=0), ) # mock mri failing to parse the metric mock.return_value = None result = get_custom_measurements( project_ids=[self.project.id], organization_id=self.organization.id, start=self.day_ago, ) assert result == []
f322fa798595b3bc7dc54e904c9628e44102f1f5
82
https://github.com/getsentry/sentry.git
182
def test_broken_custom_metric(self, mock): # Store valid metric self.store_transaction_metric( 1, metric="measurements.something_custom", internal_metric="d:transactions/measurements.something_custom@millisecond", entity="metrics_distributions", timestamp=self.day_ago + timedelta(hours=1, minutes=0), ) # mock mri failing to parse the metric mock.return_value = None result = get_custom_measurements( project_ids=[self.project.id], organiz
21
127
test_broken_custom_metric
95
1
3
30
tests/components/wemo/conftest.py
301,222
Use properties of wemo Maker device (#72378)
core
10
Python
67
conftest.py
def create_pywemo_device(pywemo_registry, pywemo_model): cls = getattr(pywemo, pywemo_model) device = create_autospec(cls, instance=True) device.host = MOCK_HOST device.port = MOCK_PORT device.name = MOCK_NAME device.serialnumber = MOCK_SERIAL_NUMBER device.model_name = pywemo_model.replace("LongPress", "") device.udn = f"uuid:{device.model_name}-1_0-{device.serialnumber}" device.firmware_version = MOCK_FIRMWARE_VERSION device.get_state.return_value = 0 # Default to Off device.supports_long_press.return_value = cls.supports_long_press() if issubclass(cls, pywemo.Insight): device.get_standby_state = pywemo.StandbyState.OFF device.current_power_watts = MOCK_INSIGHT_CURRENT_WATTS device.today_kwh = MOCK_INSIGHT_TODAY_KWH device.threshold_power_watts = MOCK_INSIGHT_STATE_THRESHOLD_POWER device.on_for = 1234 device.today_on_time = 5678 device.total_on_time = 9012 if issubclass(cls, pywemo.Maker): device.has_sensor = 1 device.sensor_state = 1 device.switch_mode = 1 device.switch_state = 0 url = f"http://{MOCK_HOST}:{MOCK_PORT}/setup.xml" with patch("pywemo.setup_url_for_address", return_value=url), patch( "pywemo.discovery.device_from_description", return_value=device ): yield device @pytest.fixture(name="pywemo_device")
92582beeff7a5d1e9fa6cfae3afa41f596b5e3c2
@pytest.fixture(name="pywemo_device")
191
https://github.com/home-assistant/core.git
237
def create_pywemo_device(pywemo_registry, pywemo_model): cls = getattr(pywemo, pywemo_model) device = create_autospec(cls, instance=True) device.host = MOCK_HOST device.port = MOCK_PORT device.name = MOCK_NAME device.serialnumber = MOCK_SERIAL_NUMBER device.model_name = pywemo_model.replace("LongPress", "") device.udn = f"uuid:{device.model_name}-1_0-{device.serialnumber}" devic
48
352
create_pywemo_device
19
0
2
10
homeassistant/components/mqtt/vacuum/schema_state.py
290,441
Use `_attr_` for MQTT vacuum (#81534) * Use `_attr_` for MQTT vacuum * Remove unneeded properties * Follow-up comment * Remove default value
core
10
Python
19
schema_state.py
async def async_locate(self, **kwargs): if self.supported_features & VacuumEntityFeature.LOCATE == 0: return await self.async_publish( self._command_topic, self._config[CONF_PAYLOAD_LOCATE], self._config[CONF_QOS], self._config[CONF_RETAIN], self._config[CONF_ENCODING], )
b364ef98a073214aad8deff4ff9b91e9ff041557
59
https://github.com/home-assistant/core.git
113
async def async_locate(self, **kwargs): if self.sup
13
89
async_locate
10
1
1
5
airbyte-integrations/connectors/source-google-analytics-v4/unit_tests/unit_test.py
3,562
reintroduce window in days, log warning when sampling occurs (#9480) * reintroduce window in days, log warning when sampling occurs * Unit tests * Documentation update * Update airbyte-integrations/connectors/source-google-analytics-v4/source_google_analytics_v4/source.py Co-authored-by: Sergei Solonitcyn <11441558+sergei-solonitcyn@users.noreply.github.com> * fix the spec Signed-off-by: Sergei Solonitcyn <sergei.solonitcyn@zazmic.com> * some mypy fixes Signed-off-by: Sergei Solonitcyn <sergei.solonitcyn@zazmic.com> * bump version * format * updated spec and def yaml * Update source.py Co-authored-by: Sergei Solonitcyn <11441558+sergei-solonitcyn@users.noreply.github.com> Co-authored-by: Sergei Solonitcyn <sergei.solonitcyn@zazmic.com> Co-authored-by: auganbay <auganenu@gmail.com>
airbyte
14
Python
10
unit_test.py
def mock_auth_check_connection(requests_mock): yield requests_mock.post( "https://analyticsreporting.googleapis.com/v4/reports:batchGet", json={"data": {"test": "value"}}, ) @pytest.fixture
f78ede0b511de022482c5f0713752ddf01460eb4
@pytest.fixture
25
https://github.com/airbytehq/airbyte.git
28
def mock_auth_check_connection(requests_mock): yield requests_mock.post( "https://analyticsreporting.googleapis.com/v4/reports:batchGet", json={"data": {"test": "value"}}, ) @pytest.fixture
6
54
mock_auth_check_connection
275
0
19
51
rllib/models/utils.py
143,803
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
14
Python
125
utils.py
def get_filter_config(shape): # VizdoomGym (large 480x640). filters_480x640 = [ [16, [24, 32], [14, 18]], [32, [6, 6], 4], [256, [9, 9], 1], ] # VizdoomGym (small 240x320). filters_240x320 = [ [16, [12, 16], [7, 9]], [32, [6, 6], 4], [256, [9, 9], 1], ] # 96x96x3 (e.g. CarRacing-v0). filters_96x96 = [ [16, [8, 8], 4], [32, [4, 4], 2], [256, [11, 11], 2], ] # Atari. filters_84x84 = [ [16, [8, 8], 4], [32, [4, 4], 2], [256, [11, 11], 1], ] # Small (1/2) Atari. filters_42x42 = [ [16, [4, 4], 2], [32, [4, 4], 2], [256, [11, 11], 1], ] # Test image (10x10). filters_10x10 = [ [16, [5, 5], 2], [32, [5, 5], 2], ] shape = list(shape) if len(shape) in [2, 3] and (shape[:2] == [480, 640] or shape[1:] == [480, 640]): return filters_480x640 elif len(shape) in [2, 3] and (shape[:2] == [240, 320] or shape[1:] == [240, 320]): return filters_240x320 elif len(shape) in [2, 3] and (shape[:2] == [96, 96] or shape[1:] == [96, 96]): return filters_96x96 elif len(shape) in [2, 3] and (shape[:2] == [84, 84] or shape[1:] == [84, 84]): return filters_84x84 elif len(shape) in [2, 3] and (shape[:2] == [42, 42] or shape[1:] == [42, 42]): return filters_42x42 elif len(shape) in [2, 3] and (shape[:2] == [10, 10] or shape[1:] == [10, 10]): return filters_10x10 else: raise ValueError( "No default configuration for obs shape {}".format(shape) + ", you must specify `conv_filters` manually as a model option. " "Default configurations are only available for inputs of shape " "[42, 42, K] and [84, 84, K]. You may alternatively want " "to use a custom model or preprocessor." )
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
505
https://github.com/ray-project/ray.git
586
def get_filter_config(shape): # VizdoomGym (large 480x640). filters_480x640 = [ [16, [24, 32], [14, 18]], [32, [6, 6], 4], [256, [9, 9], 1], ] # VizdoomGym (small 240x320). filters_240x320 = [ [16, [12, 16], [7, 9]], [32, [6, 6], 4], [256, [9, 9], 1], ] # 96x96x3 (e.g. CarRacing-v0). filters_96x96 = [ [16, [8, 8], 4], [32, [4, 4], 2], [256, [11, 11], 2], ] # Atari. filters_84x84 = [ [16, [8, 8], 4], [32, [4, 4], 2], [256, [11, 11], 1], ] # Small (1/2) Atari. filters_42x42 = [ [16, [4, 4], 2], [32, [4, 4], 2], [256, [11, 11], 1], ] # Test image (10x10). filters_10x10 = [ [16, [5, 5], 2], [32, [5, 5], 2]
12
704
get_filter_config
12
0
2
2
django/db/backends/oracle/operations.py
205,102
Refs #33476 -- Reformatted code with Black.
django
7
Python
11
operations.py
def convert_empty_string(value, expression, connection): return "" if value is None else value
9c19aff7c7561e3a82978a272ecdaad40dda5c00
17
https://github.com/django/django.git
18
def convert_empty_string(value, expression, connection):
4
26
convert_empty_string
138
0
2
29
test/units/galaxy/test_collection_install.py
268,464
Add --offline option to 'ansible-galaxy collection install' (#78678) * Add --offline option to 'ansible-galaxy collection install' to prevent querying distribution servers This allows installing/upgrading individual tarfiles to have dependency resolution. Previously needed to be done manually with --no-deps or else all collections and dependencies needed to be included in the requirements. Co-authored-by: Sviatoslav Sydorenko <wk.cvs.github@sydorenko.org.ua>
ansible
12
Python
100
test_collection_install.py
def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch, tmp_path_factory): test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input')) concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False) multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm) dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm) matches = RequirementCandidates() mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True) monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches) mock_get_info = MagicMock() mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {}, None, None) monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info) mock_get_versions = MagicMock() mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3'] monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions) mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2', '2.0.3', '2.0.4', '2.0.5'] monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions) cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.2']) requirements = cli._require_one_of_collections_requirements( ['namespace.collection:!=2.0.2'], None, artifacts_manager=concrete_artifact_cm )['collections'] actual = collection._resolve_depenency_map( requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False, False, False)['namespace.collection'] assert actual.namespace == u'namespace' assert actual.name == u'collection' assert actual.src == galaxy_server assert actual.ver == u'2.0.5' # should be ordered latest to earliest assert [c.ver for c in matches.candidates] == [u'2.0.5', u'2.0.4', u'2.0.3', u'2.0.1', u'2.0.0'] assert mock_get_versions.call_count == 1 assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
a02e22e902a69aeb465f16bf03f7f5a91b2cb828
337
https://github.com/ansible/ansible.git
232
def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch, tmp_path_factory): test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input')) concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False) multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm) dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm) matches = RequirementCandidates() mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True) monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_match
51
535
test_build_requirement_from_name_multiple_version_results
19
0
2
10
setup.py
9,809
Implement numpy hack in setup.py to enable install under Poetry (#3363) * Closes #3362: Install issue poetry * get rid of redundant exception handling this code can never raise an exception, so we shouldn't be expecting them Co-authored-by: Michael Penkov <m@penkov.dev>
gensim
14
Python
16
setup.py
def finalize_options(self): build_ext.finalize_options(self) import builtins builtins.__NUMPY_SETUP__ = False import numpy self.include_dirs.append(numpy.get_include()) if need_cython(): import Cython.Build Cython.Build.cythonize(list(make_c_ext(use_cython=True)), language_level=3) Cython.Build.cythonize(list(make_cpp_ext(use_cython=True)), language_level=3)
3331b824d2e3f7a65f5df7903382a0c0a30dcc61
81
https://github.com/RaRe-Technologies/gensim.git
93
def finalize_options(self): build_ext.finalize_options(self) import builtins builtins.__NUMPY_SETUP__ = False import numpy self.include_dirs.append(numpy.get_include()) if need_cython(): import Cython.Build Cython.Build.cythonize(list(make_c_ext(use_cython=True)), language_level=3) Cython
18
132
finalize_options
17
0
1
5
tests/providers/google/cloud/hooks/test_translate.py
44,943
Extract ClientInfo to module level (#21554)
airflow
9
Python
13
test_translate.py
def test_translate_client_creation(self, mock_client, mock_get_creds): result = self.hook.get_conn() mock_client.assert_called_once_with(credentials=mock_get_creds.return_value, client_info=CLIENT_INFO) assert mock_client.return_value == result assert self.hook._client == result
1b568d73e1dfb838a3a0446e3a6063b9f27f04b8
46
https://github.com/apache/airflow.git
44
def test_translate_client_creation(self, mock_client, mock_get_creds): result = self.hook.get_conn() mock_client.assert_called_once_with(credentials=mock_get_creds.r
13
70
test_translate_client_creation
18
0
2
9
erpnext/hr/doctype/training_event/test_training_event.py
66,208
style: format code with black
erpnext
14
Python
16
test_training_event.py
def create_training_program(training_program): if not frappe.db.get_value("Training Program", training_program): frappe.get_doc( { "doctype": "Training Program", "training_program": training_program, "description": training_program, } ).insert()
494bd9ef78313436f0424b918f200dab8fc7c20b
41
https://github.com/frappe/erpnext.git
9
def create_training_program(training_program): if not frappe.db.get_value("Training Program", training_program): frappe.get_doc( { "doctype": "Training Program", "training_program": training_program,
7
73
create_training_program
72
1
6
13
pandas/tests/series/indexing/test_setitem.py
163,209
TST/REF: port coercion tests to SetitemCastingEquivalents (#45209)
pandas
13
Python
59
test_setitem.py
def assert_warns(self, request): # check that we issue a FutureWarning about timezone-matching if request.function.__name__ == "test_slice_key": key = request.getfixturevalue("key") if not isinstance(key, slice): # The test is a no-op, so no warning will be issued yield return exp_dtype = request.getfixturevalue("exp_dtype") val = request.getfixturevalue("val") if exp_dtype == object and isinstance(val, Timestamp) and val.tz is not None: with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"): yield else: yield @pytest.mark.parametrize( "val,exp_dtype", [(Timedelta("12 day"), "timedelta64[ns]"), (1, object), ("x", object)], )
3111ff0317171f03c8ed2e3f7a153872c85d798f
@pytest.mark.parametrize( "val,exp_dtype", [(Timedelta("12 day"), "timedelta64[ns]"), (1, object), ("x", object)], )
86
https://github.com/pandas-dev/pandas.git
217
def assert_warns(self, request): # check that we issue a FutureWarning about timezone-matching if request.function.__name__ == "test_slice_key": key = request.getfixturevalue("key") if not isinstance(key, slice): # The test is a no-op, so no warning will be issued yield return exp_dtype = request.getfixturevalue("exp_dtype") val = request.getfixturevalue("val") if exp_dtype == object and isinstance(val, Timestamp) and val.tz is not None: wit
22
206
assert_warns
27
0
1
14
tests/components/telegram_bot/test_broadcast.py
295,776
Fix telegram broadcast (#69452)
core
13
Python
24
test_broadcast.py
async def test_setup(hass): assert await async_setup_component( hass, "telegram_bot", { "telegram_bot": { "platform": "broadcast", "api_key": "1234567890:ABC", "allowed_chat_ids": [1], } }, ) await hass.async_block_till_done() assert hass.services.has_service("telegram_bot", "send_message") is True
c765e11f55282530275396f8bdc837cb96259920
55
https://github.com/home-assistant/core.git
137
async def test_setup(hass): assert await async_setup_componen
6
103
test_setup
250
1
1
5
jax/_src/numpy/ufuncs.py
121,242
DOC: ensure that _wraps() generates correct links to wrapped functions
jax
15
Python
169
ufuncs.py
def _logical_op(np_op, bitwise_op): @_wraps(np_op, update_doc=False, module='numpy') @partial(jit, inline=True) def op(*args): zero = lambda x: lax.full_like(x, shape=(), fill_value=0) args = (x if dtypes.issubdtype(dtypes.dtype(x), np.bool_) else lax.ne(x, zero(x)) for x in args) return bitwise_op(*_promote_args(np_op.__name__, *args)) return op fabs = _one_to_one_unop(np.fabs, lax.abs, True) bitwise_not = _one_to_one_unop(np.bitwise_not, lax.bitwise_not) invert = _one_to_one_unop(np.invert, lax.bitwise_not) negative = _one_to_one_unop(np.negative, lax.neg) positive = _one_to_one_unop(np.positive, lambda x: x) floor = _one_to_one_unop(np.floor, lax.floor, True) ceil = _one_to_one_unop(np.ceil, lax.ceil, True) exp = _one_to_one_unop(np.exp, lax.exp, True) log = _one_to_one_unop(np.log, lax.log, True) expm1 = _one_to_one_unop(np.expm1, lax.expm1, True) log1p = _one_to_one_unop(np.log1p, lax.log1p, True) sin = _one_to_one_unop(np.sin, lax.sin, True) cos = _one_to_one_unop(np.cos, lax.cos, True) tan = _one_to_one_unop(np.tan, lax.tan, True) arcsin = _one_to_one_unop(np.arcsin, lax.asin, True) arccos = _one_to_one_unop(np.arccos, lax.acos, True) arctan = _one_to_one_unop(np.arctan, lax.atan, True) sinh = _one_to_one_unop(np.sinh, lax.sinh, True) cosh = _one_to_one_unop(np.cosh, lax.cosh, True) arcsinh = _one_to_one_unop(np.arcsinh, lax.asinh, True) tanh = _one_to_one_unop(np.tanh, lax.tanh, True) arctanh = _one_to_one_unop(np.arctanh, lax.atanh, True) sqrt = _one_to_one_unop(np.sqrt, lax.sqrt, True) cbrt = _one_to_one_unop(np.cbrt, lax.cbrt, True) add = _maybe_bool_binop(np.add, lax.add, lax.bitwise_or) bitwise_and = _one_to_one_binop(np.bitwise_and, lax.bitwise_and) bitwise_or = _one_to_one_binop(np.bitwise_or, lax.bitwise_or) bitwise_xor = _one_to_one_binop(np.bitwise_xor, lax.bitwise_xor) left_shift = _one_to_one_binop(np.left_shift, lax.shift_left) equal = _one_to_one_binop(np.equal, lax.eq) multiply = _maybe_bool_binop(np.multiply, lax.mul, lax.bitwise_and) not_equal = _one_to_one_binop(np.not_equal, lax.ne) subtract = _one_to_one_binop(np.subtract, lax.sub) arctan2 = _one_to_one_binop(np.arctan2, lax.atan2, True) minimum = _one_to_one_binop(np.minimum, lax.min) maximum = _one_to_one_binop(np.maximum, lax.max) float_power = _one_to_one_binop(np.float_power, lax.pow, True) nextafter = _one_to_one_binop(np.nextafter, lax.nextafter, True, True) greater_equal = _comparison_op(np.greater_equal, lax.ge) greater = _comparison_op(np.greater, lax.gt) less_equal = _comparison_op(np.less_equal, lax.le) less = _comparison_op(np.less, lax.lt) logical_and = _logical_op(np.logical_and, lax.bitwise_and) logical_not = _logical_op(np.logical_not, lax.bitwise_not) logical_or = _logical_op(np.logical_or, lax.bitwise_or) logical_xor = _logical_op(np.logical_xor, lax.bitwise_xor) @_wraps(np.arccosh, module='numpy') @jit
9769a0accf77c4e62ff3ace0f71b5c0697b35b54
@_wraps(np.arccosh, module='numpy') @jit
33
https://github.com/google/jax.git
225
def _logical_op(np_op, bitwise_op): @_wraps(np_op, update_doc=False, module='numpy') @partial(jit, inline=True) def op(*args): zero = lambda x: lax.full_like(x, shape=(), fill_value=0) args = (x if dtypes.issubdtype(dtypes.dtype(x), np.bool_) else lax.ne(x, zero(x)) for x in args) return bitwise_op(*_promote_args(np_op.__name__
95
1,075
_logical_op
19
0
1
11
saleor/plugins/webhook/tests/subscription_webhooks/payloads.py
27,437
Meta fields added to subscription webhooks event types. (#9759) * Meta fields added to subscription webhooks event types. * Imports adjustments. * Change Event type from Union to Interface. * Rebase fixes. * Review fixes * Handle AnonymousUser as requestor.
saleor
12
Python
17
payloads.py
def generate_app_payload(app, app_global_id): return json.dumps( { "app": { "id": app_global_id, "isActive": app.is_active, "name": app.name, "appUrl": app.app_url, } } )
f2ce999fa5865917b8d104d38ef3269eebaf6c06
41
https://github.com/saleor/saleor.git
120
def generate_app_payload(app, app_global_id): return json.dumps( { "app": { "id": app_global_id,
8
69
generate_app_payload
116
1
1
21
dashboard/modules/job/tests/test_job_agent.py
127,687
[Job Submission][refactor 4/N] Complete the remaining interfaces on JobAgent (#28533) Signed-off-by: Catch-Bull <burglarralgrub@gmail.com> just need to implement stop_job, and I remove get_job_info because we can access JobInfoStorage without call `ray.init`.
ray
13
Python
89
test_job_agent.py
async def test_submit_job(job_sdk_client, runtime_env_option, monkeypatch): # This flag allows for local testing of runtime env conda functionality # without needing a built Ray wheel. Rather than insert the link to the # wheel into the conda spec, it links to the current Python site. monkeypatch.setenv("RAY_RUNTIME_ENV_LOCAL_DEV_MODE", "1") agent_client, head_client = job_sdk_client runtime_env = runtime_env_option["runtime_env"] runtime_env = upload_working_dir_if_needed(runtime_env, logger=logger) runtime_env = upload_py_modules_if_needed(runtime_env, logger=logger) runtime_env = RuntimeEnv(**runtime_env_option["runtime_env"]).to_dict() request = validate_request_type( {"runtime_env": runtime_env, "entrypoint": runtime_env_option["entrypoint"]}, JobSubmitRequest, ) submit_result = await agent_client.submit_job_internal(request) job_id = submit_result.submission_id wait_for_condition( partial( _check_job, client=head_client, job_id=job_id, status=JobStatus.SUCCEEDED ), timeout=120, ) # There is only one node, so there is no need to replace the client of the JobAgent resp = await agent_client.get_job_logs_internal(job_id) assert runtime_env_option["expected_logs"] in resp.logs @pytest.mark.asyncio
8840be1942a69b2595a05c5c5556b0daec7abbcd
@pytest.mark.asyncio
140
https://github.com/ray-project/ray.git
215
async def test_submit_job(job_sdk_client, runtime_env_option, monkeypatch): # This flag allows for local testing of runtime env conda functionality # without needing a built Ray wheel. Rather than insert the link to the # wheel into the conda spec, it links to the current Python site. monkeypatch.setenv("RAY_RUNTIME_ENV_LOCAL_DEV_MODE", "1") agent_client, head_client = job_sdk_client runtime_env = runtime_env_option["runtime_env"] runtime_env = upload_working_dir_if_needed(runtime_env, logger=logger) runtime_env = upload_py_modules_if_needed(runtime_env, logger=logger) runtime_env = RuntimeEnv(**runtime_env_option["runtime_env"]).to_dict() request = validate_request_type( {"runtime_env": runtime_env, "entrypoint": runtime_env_option["entrypoint"]}, JobSubmitRequest, ) submit_result = await agent_client.submit_job_internal(request) job_id = submit_result.submission_id wait_for_condition( partial( _check_job, client=head_client, job_id=job_id, status=JobStatus.SUCCEEDED ), timeout=120, ) # There is only one node, so there is no need to replace the client of the
34
241
test_submit_job
21
0
1
3
networkx/algorithms/traversal/tests/test_dfs.py
176,671
MAINT: Update dfs_test with more comprehensive tests (#5654)
networkx
9
Python
14
test_dfs.py
def test_predecessor(self): assert nx.dfs_predecessors(self.G, source=0) == {1: 0, 2: 1, 3: 1, 4: 2} assert nx.dfs_predecessors(self.D) == {1: 0, 3: 2}
84aa3823e2904fd63178608373ffbed3096ae0d9
55
https://github.com/networkx/networkx.git
34
def test_predecessor(self): assert nx.dfs_predecessors(self.G, source=0) == {1: 0, 2: 1, 3: 1, 4: 2} assert nx.dfs_predecessors(self
7
80
test_predecessor
9
0
2
3
sympy/combinatorics/graycode.py
196,091
Updated import locations
sympy
13
Python
9
graycode.py
def graycode_subsets(gray_code_set): for bitstring in list(GrayCode(len(gray_code_set)).generate_gray()): yield get_subset_from_bitstring(gray_code_set, bitstring)
498015021131af4dbb07eb110e5badaba8250c7b
31
https://github.com/sympy/sympy.git
22
def graycode_subsets(gray_code_set): for bitstring in list(GrayCode(len(gray_code_set)).generate_gray()): yield get_subset_from_bitstring(gray_code_set, bitstring)
8
56
graycode_subsets
34
0
1
7
pandas/tests/plotting/test_series.py
165,017
TST: Clean tests/plotting (#45992)
pandas
11
Python
17
test_series.py
def test_plot_6951(self, ts): # GH 6951 ax = _check_plot_works(ts.plot, subplots=True) self._check_axes_shape(ax, axes_num=1, layout=(1, 1)) ax = _check_plot_works(ts.plot, subplots=True, layout=(-1, 1)) self._check_axes_shape(ax, axes_num=1, layout=(1, 1)) ax = _check_plot_works(ts.plot, subplots=True, layout=(1, -1)) self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
03fef5f0e35200aa5828975b62782bcf11faa0d2
115
https://github.com/pandas-dev/pandas.git
82
def test_plot_6951(self, ts): # GH 6951 ax = _check_plot_works(ts.plot, subplots=True) self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
10
165
test_plot_6951
8
1
1
19
wagtail/admin/ui/sidebar.py
76,493
Sidebar style updates (#8118) Co-authored-by: Thibaud Colas <thibaudcolas@gmail.com>
wagtail
9
Python
8
sidebar.py
def js_args(self): return [ reverse("wagtailadmin_home"), ] @adapter("wagtail.sidebar.SearchModule", base=BaseSidebarAdapter)
229fbf476aded9d5d5a6b32e1ad03025e8cbf392
@adapter("wagtail.sidebar.SearchModule", base=BaseSidebarAdapter)
51
https://github.com/wagtail/wagtail.git
31
def js_args(self): return [ reverse("wagtailadmin_home"), ] @adapter("wagtail.sidebar.SearchModule", base=B
6
39
js_args
28
0
2
6
lib/ansible/cli/doc.py
267,208
ansible-config added json/yaml output to list/dump (#77447) fixes #733644
ansible
13
Python
27
doc.py
def jdump(text): try: display.display(json_dump(text)) except TypeError as e: display.vvv(traceback.format_exc()) raise AnsibleError('We could not convert all the documentation into JSON as there was a conversion issue: %s' % to_native(e))
a12e0a0e874c6c0d18a1a2d83dcb106d207136af
41
https://github.com/ansible/ansible.git
54
def jdump(text): try: display.display(json_dump(text)) except TypeError as e: displa
11
72
jdump
79
0
1
31
test/mitmproxy/test_addonmanager.py
252,909
switch to stdlib logging mitmproxy previously used a homegrown logging mechanism based around `mitmproxy.ctx.log` and the `add_log` hook. This worked well for everything we control, but does not work outside the mitmproxy universe. For now we have simply ignored logging in e.g. tornado or h2, but with the upcoming introduction of mitmproxy_wireguard we now have a dependency on some Rust/PyO3 code for which we definitely want logs, but which also cannot easily be changed to use our homegrown logging (PyO3 does the heavy lifting to add interoperability with stdlib logging). Long story short, we want to introduce a log handler for stdlib logging. Now there are two ways how such a handler could operate: 1. We could build a handler that forwards all stdlib log events into our homegrown mechanism. 2. We embrace stdlib's logging as the correct way to do things, and get rid of our homegrown stuff. This PR follows the second approach by removing the `add_log` hook and rewriting the `TermLog` and `EventStore` addons to listen for stdlib log records. This means that all `mitmproxy.ctx.log.info` events are now simply `logging.info` etc. One upside of this approach is that many parts of the codebase now don't depend on the existence of `mitmproxy.ctx` and we can use off-the-shelf things like pytest's `caplog`. We can also now better colorize log output and/or add timestamps.
mitmproxy
12
Python
39
test_addonmanager.py
async def test_simple(caplog): with taddons.context(loadcore=False) as tctx: a = tctx.master.addons assert len(a) == 0 a.add(TAddon("one")) assert a.get("one") assert not a.get("two") assert len(a) == 1 a.clear() assert len(a) == 0 assert not a.chain with taddons.context(loadcore=False) as tctx: a.add(TAddon("one")) a.trigger("nonexistent") assert "AssertionError" in caplog.text f = tflow.tflow() a.trigger(hooks.RunningHook()) a.trigger(HttpResponseHook(f)) assert "not callable" in caplog.text caplog.clear() caplog.clear() a.get("one").response = addons a.trigger(HttpResponseHook(f)) assert "not callable" not in caplog.text a.remove(a.get("one")) assert not a.get("one") ta = TAddon("one") a.add(ta) a.trigger(hooks.RunningHook()) assert ta.running_called assert ta in a
c69239bb90c55993326c324908ac78cc2a174e44
229
https://github.com/mitmproxy/mitmproxy.git
280
async def test_simple(caplog): with taddons.context(loadcore=False) as tctx: a = tctx.master.addons assert len(a) == 0 a.add(TAddon("one")) assert a.get("one") assert not a.get("two") assert len(a) == 1 a.clear() assert len(a) == 0 assert not a.chain with taddons.context(loadcore=False) as tctx: a.add(TAddon("one")) a.trigger("nonexistent") assert "AssertionError" in caplog.text f = tflow.tflow()
26
404
test_simple
41
0
1
5
test/test_prototype_transforms_functional.py
193,603
cleanup prototype transforms functional tests (#6622) * cleanup prototype transforms functional tests * fix * oust local functions
vision
9
Python
37
test_prototype_transforms_functional.py
def test_scripted_smoke(self, info, args_kwargs, device): dispatcher = script(info.dispatcher) (image_feature, *other_args), kwargs = args_kwargs.load(device) image_simple_tensor = torch.Tensor(image_feature) dispatcher(image_simple_tensor, *other_args, **kwargs) # TODO: We need this until the dispatchers below also have `DispatcherInfo`'s. If they do, `test_scripted_smoke` # replaces this test for them.
658ca539369c80d8d609bf0e716dc9109186fbf4
52
https://github.com/pytorch/vision.git
75
def test_scripted_smoke(self, info, args_kwargs, device): dispatcher = script(info.dispatcher) (image_feature, *other_args), kwargs = args_kwargs.load(device) image_simple_tensor = torch.Tensor(image_feature) dispatcher(image_simple_tensor, *other_args, **kwargs) # TODO: We need this until the dispatchers below also have `DispatcherInfo`'s. If they do, `test_scripted_smoke` # replaces this te
14
81
test_scripted_smoke
11
0
1
2
awx/main/dispatch/control.py
81,675
Refactor canceling to work through messaging and signals, not database If canceled attempted before, still allow attempting another cancel in this case, attempt to send the sigterm signal again. Keep clicking, you might help! Replace other cancel_callbacks with sigterm watcher adapt special inventory mechanism for this too Get rid of the cancel_watcher method with exception in main thread Handle academic case of sigterm race condition Process cancelation as control signal Fully connect cancel method and run_dispatcher to control Never transition workflows directly to canceled, add logs
awx
11
Python
10
control.py
def cancel(self, task_ids, *args, **kwargs): return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)
c59bbdecdbdd920c5d3d298d691129c6bbc94c5e
34
https://github.com/ansible/awx.git
17
def cancel(self, task_ids, *args, **kwargs): return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)
7
53
cancel
12
0
2
5
django/forms/widgets.py
206,034
Refs #33476 -- Reformatted code with Black.
django
10
Python
12
widgets.py
def render_js(self): return [ format_html('<script src="{}"></script>', self.absolute_path(path)) for path in self._js ]
9c19aff7c7561e3a82978a272ecdaad40dda5c00
25
https://github.com/django/django.git
47
def render_js(self): return [ format_html('<script src="{}"></script>', self.absolute_path(path)) for path in self._js ]
6
40
render_js
54
0
4
14
erpnext/accounts/doctype/pos_invoice/pos_invoice.py
69,301
fix: POS properly validate stock for bundle products Stock availability was not calculated properly for Product Bundle with non stock item so i have added logic to properly calculate that as well.
erpnext
11
Python
35
pos_invoice.py
def get_bundle_availability(bundle_item_code, warehouse): product_bundle = frappe.get_doc("Product Bundle", bundle_item_code) bundle_bin_qty = 1000000 for item in product_bundle.items: item_bin_qty = get_bin_qty(item.item_code, warehouse) item_pos_reserved_qty = get_pos_reserved_qty(item.item_code, warehouse) available_qty = item_bin_qty - item_pos_reserved_qty max_available_bundles = available_qty / item.qty if bundle_bin_qty > max_available_bundles and frappe.get_value( "Item", item.item_code, "is_stock_item" ): bundle_bin_qty = max_available_bundles pos_sales_qty = get_pos_reserved_qty(bundle_item_code, warehouse) return bundle_bin_qty - pos_sales_qty
e392ea1104fee5add5c893c4e092edb6ad21f486
92
https://github.com/frappe/erpnext.git
40
def get_bundle_availability(bundle_item_code, warehouse): product_bundle = frappe.get_doc("Product Bundle", bundle_item_code) bundle_bin_qty = 1000000 for item in product_bundle.items: item_bin_qty = get_bin_qty(item.item_code, warehouse) item_pos_reserved_qty = get_pos_reserved_qty(
19
145
get_bundle_availability
18
0
2
6
src/sentry/runner/commands/sendmail.py
92,867
feat(runner): Add sendmail command to send emails from sentry command (#36732)
sentry
13
Python
18
sendmail.py
def sendmail(files, fail_silently): from sentry.runner import configure configure() for file in files: with open(file) as f: send_prepared_email(f.read(), fail_silently=fail_silently)
b72ff60d06f2cd1dae8390bebf71d191ae341faf
42
https://github.com/getsentry/sentry.git
48
def sendmail(files, fail_silently): fr
11
72
sendmail
20
1
1
3
modin/pandas/test/test_series.py
154,811
TEST-#5040: Rework test_series using eval_general() (#5041) Signed-off-by: Vasily Litvinov <fam1ly.n4me@yandex.ru>
modin
11
Python
20
test_series.py
def test_str_get(data, i): modin_series, pandas_series = create_test_series(data) eval_general(modin_series, pandas_series, lambda series: series.str.get(i)) @pytest.mark.parametrize( "data", test_string_list_data_values, ids=test_string_list_data_keys ) @pytest.mark.parametrize("sep", string_sep_values, ids=string_sep_keys)
d86dda5094eba47840f42a21cf4b2c953e698960
@pytest.mark.parametrize( "data", test_string_list_data_values, ids=test_string_list_data_keys ) @pytest.mark.parametrize("sep", string_sep_values, ids=string_sep_keys)
33
https://github.com/modin-project/modin.git
25
def test_str_get(data, i): modin_series, pandas_series = create_test_series(data) eval_general(modin_series, pandas_series, lambda series: series.str.get(i)) @pytest.mark.parametrize( "data", test_string_list_data_values, ids=test_string_list_data_keys ) @pytest.mark.parametrize("sep", string_sep_values, ids=string_sep_keys)
18
98
test_str_get
216
0
7
40
mindsdb/api/mysql/mysql_proxy/datahub/information_schema.py
114,202
return columns list for files
mindsdb
14
Python
85
information_schema.py
def _get_columns(self): columns = self.information_schema['COLUMNS'] # NOTE there is a lot of types in mysql, but listed below should be enough for our purposes row_templates = { 'text': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', None, 'YES', 'varchar', 1024, 3072, None, None, None, 'utf8', 'utf8_bin', 'varchar(1024)', None, None, 'select', None, None], 'timestamp': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', 'CURRENT_TIMESTAMP', 'YES', 'timestamp', None, None, None, None, 0, None, None, 'timestamp', None, None, 'select', None, None], 'bigint': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', None, 'YES', 'bigint', None, None, 20, 0, None, None, None, 'bigint unsigned', None, None, 'select', None, None], 'float': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', None, 'YES', 'float', None, None, 12, 0, None, None, None, 'float', None, None, 'select', None, None] } result = [] for table_name in self.information_schema: table_columns = self.information_schema[table_name] for i, column_name in enumerate(table_columns): result_row = row_templates['text'].copy() result_row[1] = 'information_schema' result_row[2] = table_name result_row[3] = column_name result_row[4] = i result.append(result_row) mindsb_dn = self.get('MINDSDB') for table_name in mindsb_dn.get_tables(): table_columns = mindsb_dn.get_table_columns(table_name) for i, column_name in enumerate(table_columns): result_row = row_templates['text'].copy() result_row[1] = 'mindsdb' result_row[2] = table_name result_row[3] = column_name result_row[4] = i result.append(result_row) mindsb_dn = self.get('FILES') for table_name in mindsb_dn.get_tables(): table_columns = mindsb_dn.get_table_columns(table_name) for i, column_name in enumerate(table_columns): result_row = row_templates['text'].copy() result_row[1] = 'files' result_row[2] = table_name result_row[3] = column_name result_row[4] = i result.append(result_row) df = pd.DataFrame(result, columns=columns) return df
284fb890eca785bec894e69c6d5b122e83ef93d0
433
https://github.com/mindsdb/mindsdb.git
679
def _get_columns(self): columns = self.information_schema['COLUMNS'] # NOTE there is a lot of types in mysql, but listed below should be enough for our purposes row_templates = { 'text': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', None, 'YES', 'varchar', 1024, 3072, None, None, None, 'utf8', 'utf8_bin', 'varchar(1024)', None, None, 'select', None, None], 'timestamp': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', 'CURRENT_TIMESTAMP', 'YES', 'timestamp', None, None, None, None, 0, None, None, 'timestamp', None, None, 'select', None, None], 'bigint': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', None, 'YES', 'bigint', None, None, 20, 0, None, None, None, 'bigint unsigned', None, None, 'select', None, None], 'float': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', None, 'YES', 'float', None, None, 12, 0, None, None, None, 'float', None, None, 'select', None, None] } result = [] for table_name in self.information_schema: table_columns = self.information_schema[table_name] for i, column_name in enumerate(table_columns): result_row = row_templates['text'].copy() result_row[1] = 'information_schema' result_row[2] = table_name result_row[3] = column_name result_row[4] = i result.append(result_row) mindsb_dn = self.get('MINDSDB') for table_name in mindsb_dn.get_tables(): table_columns = mindsb_dn.get_table_columns(table_name) for i, column_name in enumerate(table_columns): result_row = row_templates['text'].copy() resu
21
685
_get_columns
9
0
2
7
wagtail/core/migrations/0027_fix_collection_path_collation.py
73,767
Reformat with black
wagtail
10
Python
9
0027_fix_collection_path_collation.py
def set_collection_path_collation(apps, schema_editor): if schema_editor.connection.vendor == "postgresql": schema_editor.execute( )
d10f15e55806c6944827d801cd9c2d53f5da4186
23
https://github.com/wagtail/wagtail.git
41
def set_collection_path_collation(apps, schema_editor): if schema_editor.connection.vendor == "postgresql": schema_editor.execute( )
6
43
set_collection_path_collation
102
1
1
22
sklearn/manifold/tests/test_isomap.py
261,683
TST use global_dtype in sklearn/manifold/tests/test_isomap.py (#22673) Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com> Co-authored-by: Jérémie du Boisberranger <jeremiedbb@users.noreply.github.com>
scikit-learn
15
Python
70
test_isomap.py
def test_pipeline_with_nearest_neighbors_transformer(global_dtype): # Test chaining NearestNeighborsTransformer and Isomap with # neighbors_algorithm='precomputed' algorithm = "auto" n_neighbors = 10 X, _ = datasets.make_blobs(random_state=0) X2, _ = datasets.make_blobs(random_state=1) X = X.astype(global_dtype, copy=False) X2 = X2.astype(global_dtype, copy=False) # compare the chained version and the compact version est_chain = pipeline.make_pipeline( neighbors.KNeighborsTransformer( n_neighbors=n_neighbors, algorithm=algorithm, mode="distance" ), manifold.Isomap(n_neighbors=n_neighbors, metric="precomputed"), ) est_compact = manifold.Isomap( n_neighbors=n_neighbors, neighbors_algorithm=algorithm ) Xt_chain = est_chain.fit_transform(X) Xt_compact = est_compact.fit_transform(X) assert_allclose(Xt_chain, Xt_compact) Xt_chain = est_chain.transform(X2) Xt_compact = est_compact.transform(X2) assert_allclose(Xt_chain, Xt_compact) @pytest.mark.parametrize( "metric, p, is_euclidean", [ ("euclidean", 2, True), ("manhattan", 1, False), ("minkowski", 1, False), ("minkowski", 2, True), (lambda x1, x2: np.sqrt(np.sum(x1**2 + x2**2)), 2, False), ], )
35b826a49c66bc6cb03dc751784483806aba6400
@pytest.mark.parametrize( "metric, p, is_euclidean", [ ("euclidean", 2, True), ("manhattan", 1, False), ("minkowski", 1, False), ("minkowski", 2, True), (lambda x1, x2: np.sqrt(np.sum(x1**2 + x2**2)), 2, False), ], )
154
https://github.com/scikit-learn/scikit-learn.git
239
def test_pipeline_with_nearest_neighbors_transformer(global_dtype): # Test chaining NearestNeighborsTransformer and Isomap with # neighbors_algorithm='precomputed' algorithm = "auto" n_neighbors = 10 X, _ = datasets.make_blobs(random_state=0) X2, _ = datasets.make_blobs(random_state=1) X = X.astype(global_dtype, copy=False) X2 = X2.astype(global_dtype, copy=False) # compare the chained version and the compact version est_chain = pipeline.make_pipeline(
36
353
test_pipeline_with_nearest_neighbors_transformer
103
0
3
22
ppocr/data/imaug/fce_aug.py
23,174
add fcenet
PaddleOCR
15
Python
66
fce_aug.py
def generate_crop_target(self, image, all_polys, pad_h, pad_w): h, w, _ = image.shape h_array = np.zeros((h + pad_h * 2), dtype=np.int32) w_array = np.zeros((w + pad_w * 2), dtype=np.int32) text_polys = [] for polygon in all_polys: rect = cv2.minAreaRect(polygon.astype(np.int32).reshape(-1, 2)) box = cv2.boxPoints(rect) box = np.int0(box) text_polys.append([box[0], box[1], box[2], box[3]]) polys = np.array(text_polys, dtype=np.int32) for poly in polys: poly = np.round(poly, decimals=0).astype(np.int32) minx = np.min(poly[:, 0]) maxx = np.max(poly[:, 0]) w_array[minx + pad_w:maxx + pad_w] = 1 miny = np.min(poly[:, 1]) maxy = np.max(poly[:, 1]) h_array[miny + pad_h:maxy + pad_h] = 1 h_axis = np.where(h_array == 0)[0] w_axis = np.where(w_array == 0)[0] return h_axis, w_axis
9f62b610dea6161627200ed85d92e19b1923279a
281
https://github.com/PaddlePaddle/PaddleOCR.git
301
def generate_crop_target(self, image, all_polys, pad_h, pad_w): h, w, _ = image.shape h_array = np.zeros((h + pad_h * 2), dtype=np.int32) w_array = np.zeros((w + pad_w * 2), dtype=np.int32) text_polys = [] for polygon in all_polys: rect = cv2.minAreaRect(polygon.astype(np.int32).reshape(-1, 2)) box = cv2.boxPoints(rect) box = np.int0(box) text_polys.append([box[0], box[1], box[2], box[3]]) polys = np.array(text_polys, dtype=np.int32) for poly in polys: poly = np.round(poly, decimals=0).astype(np.int32) minx = np.min(poly[:, 0]) maxx = np.max(poly[:, 0]) w_array[minx + pad_w:maxx + pad_w] = 1 miny = np.min(po
41
424
generate_crop_target
67
0
2
26
netbox/dcim/views.py
264,595
Fixes #8935: Correct ordering of next/previous racks to use naturalized names
netbox
14
Python
57
views.py
def get_extra_context(self, request, instance): # Get 0U devices located within the rack nonracked_devices = Device.objects.filter( rack=instance, position__isnull=True, parent_bay__isnull=True ).prefetch_related('device_type__manufacturer') peer_racks = Rack.objects.restrict(request.user, 'view').filter(site=instance.site) if instance.location: peer_racks = peer_racks.filter(location=instance.location) else: peer_racks = peer_racks.filter(location__isnull=True) next_rack = peer_racks.filter(_name__gt=instance._name).first() prev_rack = peer_racks.filter(_name__lt=instance._name).reverse().first() reservations = RackReservation.objects.restrict(request.user, 'view').filter(rack=instance) power_feeds = PowerFeed.objects.restrict(request.user, 'view').filter(rack=instance).prefetch_related( 'power_panel' ) device_count = Device.objects.restrict(request.user, 'view').filter(rack=instance).count() return { 'device_count': device_count, 'reservations': reservations, 'power_feeds': power_feeds, 'nonracked_devices': nonracked_devices, 'next_rack': next_rack, 'prev_rack': prev_rack, }
197dfca5b2d181369b90e40704ac9188d149a688
221
https://github.com/netbox-community/netbox.git
296
def get_extra_context(self, request, instance): # Get 0U devices located within the rack nonracked_devices = Device.objects.filter( rack=instance, position__isnull=True, parent_bay__isnull=True ).prefetch_related('device_type__manufacturer') peer_racks = Rack.objects.restrict(request.user, 'view').filter(site=instance.site) if instance.location: peer_racks = peer_racks.filter(location=instance.location) else: peer_racks = peer_racks.filter(location__isnull
32
362
get_extra_context
35
1
1
21
tests/integration_tests/test_ray.py
8,608
int: Refactor `test_ray.py` to limit number of full train jobs (#2637) * refactors majority of ray tests to compare preprocessing results * reverted changes to concatenate_datasets; use fixed split config * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added partial modin fix; fixed reshape guard Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Justin Zhao <justinxzhao@gmail.com>
ludwig
13
Python
29
test_ray.py
def test_ray_tabular(tmpdir, df_engine, ray_cluster_2cpu): input_features = [ category_feature(encoder={"vocab_size": 2}, reduce_input="sum"), number_feature(normalization="zscore"), set_feature(), binary_feature(), bag_feature(), h3_feature(), date_feature(), ] output_features = [ binary_feature(bool2str=["No", "Yes"]), binary_feature(), number_feature(normalization="zscore"), ] run_preprocessing( tmpdir, df_engine, input_features, output_features, ) @pytest.mark.parametrize("dataset_type", ["csv", "parquet"]) @pytest.mark.distributed
0b33b3f6524ecbf3671b2c911efe429feb93988f
@pytest.mark.parametrize("dataset_type", ["csv", "parquet"]) @pytest.mark.distributed
92
https://github.com/ludwig-ai/ludwig.git
148
def test_ray_tabular(tmpdir, df_engine, ray_cluster_2cpu): input_features = [ category_feature(encoder={"vocab_size": 2}, reduce_input="sum"), number_feature(normalization="zscore"), set_feature(), binary_feature(), bag_feature(), h3_feature(), date_feature(), ] output_features = [ binary_
22
182
test_ray_tabular
103
0
7
41
src/paperless_tika/parsers.py
319,393
improve pdf generation
paperless-ngx
15
Python
65
parsers.py
def extract_metadata(self, document_path, mime_type): result = [] prefix_pattern = re.compile(r"(.*):(.*)") try: parsed = self.get_tika_result(document_path) except ParseError as e: self.log( "warning", f"Error while fetching document metadata for " f"{document_path}: {e}", ) return result for key, value in parsed["metadata"].items(): if isinstance(value, list): value = ", ".join([str(e) for e in value]) value = str(value) try: m = prefix_pattern.match(key) result.append( { "namespace": "", "prefix": m.group(1), "key": m.group(2), "value": value, }, ) except AttributeError: result.append( { "namespace": "", "prefix": "", "key": key, "value": value, }, ) except Exception as e: self.log( "warning", f"Error while reading metadata {key}: {value}. Error: " f"{e}", ) result.sort(key=lambda item: (item["prefix"], item["key"])) return result
c8081595c4450780eade4921a81d0b1bd08105cc
207
https://github.com/paperless-ngx/paperless-ngx.git
702
def extract_metadata(self, document_path, mime_type): result = [] prefix_pattern = re.compile(r"(.*):(.*)") try: parsed = self.get_tika_result(document_path) except ParseError as e: self.log( "warning", f"Error while fetching document metadata for " f"{document_path}: {e}", ) return result for key, value in parsed["metadata"].items(): if isinstance(value, list): value = ", ".join([str(e) for e in value]) value = str(value) try: m = prefix
28
372
extract_metadata
42
0
5
13
lib/matplotlib/axis.py
110,246
DOC: improve grammar and consistency
matplotlib
10
Python
27
axis.py
def update_units(self, data): converter = munits.registry.get_converter(data) if converter is None: return False neednew = self.converter != converter self.converter = converter default = self.converter.default_units(data, self) if default is not None and self.units is None: self.set_units(default) elif neednew: self._update_axisinfo() self.stale = True return True
9b6abd0b4933811e0a45c2535ab8fd107db65dd9
82
https://github.com/matplotlib/matplotlib.git
145
def update_units(self, data): converter = munits.registry.get_converter(data) if converter is None:
14
132
update_units
13
0
1
11
wagtail/snippets/views/snippets.py
79,882
Fix missing preview_url_name in SnippetViewSet.revisions_revert_view (#9663)
wagtail
11
Python
13
snippets.py
def revisions_revert_view(self): return self.revisions_revert_view_class.as_view( model=self.model, permission_policy=self.permission_policy, index_url_name=self.get_url_name("list"), edit_url_name=self.get_url_name("edit"), delete_url_name=self.get_url_name("delete"), history_url_name=self.get_url_name("history"), preview_url_name=self.get_url_name("preview_on_edit"), revisions_revert_url_name=self.get_url_name("revisions_revert"), )
baddbfad424afa797287151e6b04d9c0459df3ef
79
https://github.com/wagtail/wagtail.git
114
def revisions_revert_view(self): return self.revisions_revert_view_class.as_view( model=self.model, permission_policy=self.permission_policy, index_url_name=self.get_url_name("list"), edit_url_name=self.get_url_name("edit"), delete_url_name=self.get_url_name("delete"), history_url_name=self.get_url_name("history"), preview_url_name=self.get_url_name("preview_on_edit"), revisions_revert_u
13
128
revisions_revert_view