n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
25
0
1
7
dask/dataframe/tests/test_shuffle.py
155,889
Add some extra test coverage (#8302) Adds a test for an option to `sort_values` that wasn't previously tested, as well as a few other tests for lines in `numeric.py` and `shuffle.py` that weren't tested.
dask
11
Python
24
test_shuffle.py
def test_shuffle_values_raises(): df = pd.DataFrame({"a": [1, 3, 2]}) ddf = dd.from_pandas(df, npartitions=3) with pytest.raises( ValueError, match="na_position must be either 'first' or 'last'" ): ddf.sort_values(by="a", na_position="invalid")
0e4ddc4772d5e48858d6627979f1fddefc7f1cb1
58
https://github.com/dask/dask.git
50
def test_shuffle_values_raises(): df = pd.DataFrame({"a": [1, 3, 2]}) ddf = dd.from_pandas(df, npartitions=3) with pytest.raises( ValueError, match="na_position must be either 'first' or 'last'" ): ddf.sort_values(b
15
98
test_shuffle_values_raises
99
1
13
28
erpnext/accounts/doctype/pricing_rule/pricing_rule.py
64,047
fix: ignore pricing rule in all transactions
erpnext
17
Python
68
pricing_rule.py
def remove_pricing_rule_for_item(pricing_rules, item_details, item_code=None): from erpnext.accounts.doctype.pricing_rule.utils import ( get_applied_pricing_rules, get_pricing_rule_items, ) for d in get_applied_pricing_rules(pricing_rules): if not d or not frappe.db.exists("Pricing Rule", d): continue pricing_rule = frappe.get_cached_doc('Pricing Rule', d) if pricing_rule.price_or_product_discount == 'Price': if pricing_rule.rate_or_discount == 'Discount Percentage': item_details.discount_percentage = 0.0 item_details.discount_amount = 0.0 item_details.rate = item_details.get('price_list_rate', 0) if pricing_rule.rate_or_discount == 'Discount Amount': item_details.discount_amount = 0.0 if pricing_rule.margin_type in ['Percentage', 'Amount']: item_details.margin_rate_or_amount = 0.0 item_details.margin_type = None elif pricing_rule.get('free_item'): item_details.remove_free_item = (item_code if pricing_rule.get('same_item') else pricing_rule.get('free_item')) if pricing_rule.get("mixed_conditions") or pricing_rule.get("apply_rule_on_other"): items = get_pricing_rule_items(pricing_rule) item_details.apply_on = (frappe.scrub(pricing_rule.apply_rule_on_other) if pricing_rule.apply_rule_on_other else frappe.scrub(pricing_rule.get('apply_on'))) item_details.applied_on_items = ','.join(items) item_details.pricing_rules = '' return item_details @frappe.whitelist()
f6dda738dc99060090e703b21f7a77692887605b
@frappe.whitelist()
237
https://github.com/frappe/erpnext.git
70
def remove_pricing_rule_for_item(pricing_rules, item_details, item_code=None): from erpnext.accounts.doctype.pricing_rule.utils import ( get_applied_pricing_rules, get_pricing_rule_items, ) for d in get_applied_pricing_rules(pricing_rules): if not d or not frappe.db.exists("Pricing Ru
32
393
remove_pricing_rule_for_item
40
0
7
14
django/utils/regex_helper.py
206,724
Refs #33476 -- Reformatted code with Black.
django
12
Python
26
regex_helper.py
def walk_to_end(ch, input_iter): if ch == "(": nesting = 1 else: nesting = 0 for ch, escaped in input_iter: if escaped: continue elif ch == "(": nesting += 1 elif ch == ")": if not nesting: return nesting -= 1
9c19aff7c7561e3a82978a272ecdaad40dda5c00
53
https://github.com/django/django.git
146
def walk_to_end(ch, input_iter): if ch == "(": nesting = 1 else: nesting = 0 for ch, escaped in input_iter: if escaped: continue e
5
95
walk_to_end
55
0
1
23
test/test_bar.py
152,851
Update many unit test codes.(Coverage Up to 99%)
pyecharts
21
Python
50
test_bar.py
def test_bar_add_dataset(fake_writer): c = ( Bar() .add_dataset( source=[ ["product", "2015", "2016", "2017"], ["Matcha Latte", 43.3, 85.8, 93.7], ["Milk Tea", 83.1, 73.4, 55.1], ["Cheese Cocoa", 86.4, 65.2, 82.5], ["Walnut Brownie", 72.4, 53.9, 39.1], ] ) .add_yaxis(series_name="2015", y_axis=[]) .add_yaxis(series_name="2016", y_axis=[]) .add_yaxis(series_name="2017", y_axis=[]) .set_global_opts( title_opts=opts.TitleOpts(title="Dataset simple bar example"), xaxis_opts=opts.AxisOpts(type_="category"), ) ) c.render() _, content = fake_writer.call_args[0] assert_in("dataset", content)
1d5330b1559fe8033556d6b27970e4b14fa3b253
177
https://github.com/pyecharts/pyecharts.git
244
def test_bar_add_dataset(fake_writer): c = ( Bar() .add_dataset( source=[ ["product", "2015", "2016", "2017"],
22
243
test_bar_add_dataset
26
0
2
9
plugins/train/model/_base/model.py
100,813
Refactoring and TravisCI to Github Actions (#1239) * refactor training * travis to actions
faceswap
13
Python
23
model.py
def config(self) -> dict: global _CONFIG # pylint: disable=global-statement if not _CONFIG: model_name = self._config_section logger.debug("Loading config for: %s", model_name) _CONFIG = Config(model_name, configfile=self._configfile).config_dict return _CONFIG
ff6b0209dd5ad57b81b0aca570df7f39a7119bfb
43
https://github.com/deepfakes/faceswap.git
88
def config(self) -> dict: global _CONFIG # pylint: disable=global-statement if not _CONFIG: model_name = self._config_section logger.debug(
12
73
config
72
0
2
8
lib/mpl_toolkits/mplot3d/axes3d.py
109,743
Add pan and zoom toolbar handling to 3D Axes (Replaces PR#22614) (#23449) * ENH: Add pan and zoom toolbar handling to 3D Axes 1) This moves the pan logic that was already in the mouse move handler into the "drag_pan" method to make it available from the toolbar. 2) This expands upon the panning logic to enable a zoom-to-box feature. The zoom-to-box is done relative to the Axes, so it shrinks/expands the box as a fraction of each delta, from lower-left Axes to lower-left zoom-box. Thus, it tries to handle non-centered zooms, which adds more cases to handle versus the current right-click zoom only scaling from the center of the projection. * Rewrite zooming with bounding box * Rewrite 3d panning to work with a roll angle * Whats new for zoom and pan buttons * Make pan button configurable * Do not jump when zooming and mouse goes over other subplot * Rework zooming for 3d plots * Handle x/y lock when zooming and panning * Update tests * Docstrings * Dont assume a scale_z * Limit zoom box * Test zoom pan key modifiers * Save some calculation by saving view axes * Deprecation warnings for Axes3D.eye, .vvec * Remove Axes3D._prepare_view_from_bbox for now * Comments and docstrings * Switch from uvn to uvw * Save aspect to axes * Constrain zooming with mouse when one of the equal aspect ratios is set * Cleanup * Cleanup * Consolidate finding equal aspect axis indices * linting * More intuitive scaling * Box zoom keeps existing aspect ratios * Linting * Code review comments * Revert parameters for view_transformation * Fix new 3d pan/zoom view going on view stack twice * Better clipping * Test 3d toolbar navigation * Privatize helper functions * Deprecations * Code review changes * Deprecation note * Undeprecate proj3d.view_transformation * Undeprecate proj3d.view_transformation * Update doc/api/next_api_changes/deprecations/23449-SS.rst Co-authored-by: Greg Lucas <greg.m.lucas@gmail.com> Co-authored-by: Scott Shambaugh <scottshambaugh@users.noreply.github.com> Co-authored-by: Oscar Gustafsson <oscar.gustafsson@gmail.com>
matplotlib
11
Python
58
axes3d.py
def _calc_view_axes(self, eye): elev_rad = np.deg2rad(art3d._norm_angle(self.elev)) roll_rad = np.deg2rad(art3d._norm_angle(self.roll)) # Look into the middle of the world coordinates R = 0.5 * self._roll_to_vertical(self._box_aspect) # Define which axis should be vertical. A negative value # indicates the plot is upside down and therefore the values # have been reversed: V = np.zeros(3) V[self._vertical_axis] = -1 if abs(elev_rad) > np.pi/2 else 1 u, v, w = proj3d._view_axes(eye, R, V, roll_rad) return u, v, w
4896ec1a2cfb8c454e385632d8df213c915ced52
106
https://github.com/matplotlib/matplotlib.git
156
def _calc_view_axes(self, eye): elev_rad =
24
164
_calc_view_axes
21
0
1
13
mkdocs/tests/cli_tests.py
224,298
Format code with `black -l100 --skip-string-normalization`
mkdocs
10
Python
21
cli_tests.py
def test_serve_dirtyreload(self, mock_serve): result = self.runner.invoke(cli.cli, ["serve", '--dirtyreload'], catch_exceptions=False) self.assertEqual(result.exit_code, 0) mock_serve.assert_called_once_with( dev_addr=None, livereload='dirty', config_file=None, strict=None, theme=None, use_directory_urls=None, watch_theme=False, watch=(), )
dca7cbb43fcd6ea7c677c98ba585395b070d387b
77
https://github.com/mkdocs/mkdocs.git
136
def test_serve_dirtyreload(self, mock_serve): result = self.runner.invoke(cli.cli, ["serve", '--dirtyreload'], catch_exceptions=False) self.assertEqual(result.exit_code, 0) mock_serve.assert_called_once_with( dev_addr=None, livereload='dirty',
19
114
test_serve_dirtyreload
132
0
6
23
misc/tools/postprocess-vf2.py
162,942
Remove slnt/ital VF axis This removes the slant/italic variable axis and breaks up the font in two: roman and italic. This change will allow diverging designs for italic (for example single-storey a). It also addresses the fact that most software, including web browsers, doesn't handle VFs with slnt or ital well.
inter
10
Python
59
postprocess-vf2.py
def build_opsz_axis_values(ttfont): nametable = ttfont['name'] instances = ttfont['fvar'].instances val_min = 0.0 val_max = 0.0 for instance in instances: opsz_val = instance.coordinates["opsz"] if val_min == 0.0 or opsz_val < val_min: val_min = opsz_val if val_max == 0.0 or opsz_val > val_max: val_max = opsz_val return [ { "name": "Regular", "value": val_min, "linkedValue": val_max, "flags": 2, }, { "name": "Display", "value": val_max, }, ] # results = [] # for instance in instances: # opsz_val = instance.coordinates["opsz"] # name = nametable.getName(instance.subfamilyNameID, 3, 1, 1033).toUnicode() # name = name.replace("Italic", "").strip() # if name == "": # name = "Regular" # inst = { # "name": name, # "value": opsz_val, # } # if int(opsz_val) == val_min: # inst["flags"] = 0 # inst["linkedValue"] = val_max # else: # inst["linkedValue"] = val_min # results.append(inst) # return results
3f174fcef6b614ee58716b7ec1b2744e137069ae
103
https://github.com/rsms/inter.git
259
def build_opsz_axis_values(ttfont): nametable = ttfont['name'] instances = ttfont['fvar'].instances val_min = 0.0 val_max = 0.0 for instance in instances: opsz_val = instance.coordinates["opsz"] if val_min == 0.0 or opsz_val < val_min: val_min = opsz_val if val_max == 0.0 or opsz_val > val_max: val_max = opsz_val return [ { "name": "Regular",
9
180
build_opsz_axis_values
32
0
1
15
tests/openbb_terminal/cryptocurrency/test_cryptocurrency_helpers.py
283,405
Updating some names (#1575) * quick econ fix * black * keys and feature flags * terminal name :eyes: * some more replacements * some more replacements * edit pyproject * gst -> openbb * add example portfolios back to git * Update api from gst * sorry. skipping some tests * another round of names * another round of test edits * Missed some .gst refs and update timezone * water mark stuff * Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS * fix more GST to OpenBB Terminal * Logging : merge conflicts with main * Revert wrong files Co-authored-by: Andrew <andrew.kenreich@gmail.com> Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt> Co-authored-by: Chavithra PARANA <chavithra@gmail.com>
OpenBBTerminal
12
Python
27
test_cryptocurrency_helpers.py
def test_coin_api_load_df_for_ta(self, mock_load): with open( "tests/openbb_terminal/cryptocurrency/json/test_cryptocurrency_helpers/btc_usd_test_data.json", encoding="utf8", ) as f: sample_return = json.load(f) mock_load.return_value = sample_return mock_return, vs = load_ta_data( coin_map_df=self.coin_map_df, source="cg", currency="usd", days=30, ) self.assertTrue(mock_return.shape == (31, 4)) self.assertTrue(vs == "usd")
b71abcfbf4d7e8ac1855522aff0378e13c8b5362
81
https://github.com/OpenBB-finance/OpenBBTerminal.git
165
def test_coin_api_load_df_for_ta(self, mock_load): with open( "tests/openbb_terminal/cryptocurrency/json/test_cryptocurrency_helpers/btc_usd_test_data.json", encoding="utf8", ) as f: sample_return = json.load(f) mock_load.return_va
19
137
test_coin_api_load_df_for_ta
39
0
1
22
tests/rest/client/test_keys.py
246,653
Add type hints to `tests/rest/client` (#12072)
synapse
15
Python
31
test_keys.py
def test_rejects_device_key_given_as_map_to_bool(self) -> None: self.register_user("alice", "wonderland") alice_token = self.login("alice", "wonderland") bob = self.register_user("bob", "uncle") channel = self.make_request( "POST", "/_matrix/client/r0/keys/query", { "device_keys": { bob: { "device_id1": True, }, }, }, alice_token, ) self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) self.assertEqual( channel.json_body["errcode"], Codes.BAD_JSON, channel.result, )
54e74cc15f30585f5874780437614c0df6f639d9
101
https://github.com/matrix-org/synapse.git
273
def test_rejects_device_key_given_as_map_to_bool(self) -> None: self.register_user("alice", "wonderland") alice_token = self.login("alice", "wonderland") bob = self.register_user("bob", "uncle") channel = self.make_request( "POST", "/_matrix/client/r0/keys/query", { "device_keys": { bob: { "device_id1": True, }, }, }, alice_token, ) self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.resu
16
168
test_rejects_device_key_given_as_map_to_bool
27
0
3
4
netbox/dcim/forms/models.py
264,382
#7844: Allow installing modules via UI without replicating components
netbox
10
Python
27
models.py
def save(self, *args, **kwargs): # If replicate_components is False, disable automatic component replication on the instance if self.instance.pk or not self.cleaned_data['replicate_components']: self.instance._disable_replication = True return super().save(*args, **kwargs)
a2981870ce6911d577dc2af3d6cd2cf5c952aa14
46
https://github.com/netbox-community/netbox.git
58
def save(self, *args, **kwargs): # If replicate_components is False, disable automatic component replication on the instance if self.instance.pk or not self.cleaned_data['replicate_components']: self.instance._disable_replication = True
9
75
save
28
1
1
11
tests/components/generic/test_config_flow.py
294,646
Generic IP Camera configflow 2 (#52360) Co-authored-by: J. Nick Koston <nick@koston.org>
core
13
Python
26
test_config_flow.py
async def test_form_stream_unauthorised(hass, fakeimg_png, user_flow): with patch( "homeassistant.components.generic.config_flow.av.open", side_effect=av.error.HTTPUnauthorizedError(0, 0), ): result2 = await hass.config_entries.flow.async_configure( user_flow["flow_id"], TESTDATA, ) assert result2["type"] == "form" assert result2["errors"] == {"stream_source": "stream_unauthorised"} @respx.mock
c1a2be72fc8b76b55cfde1823c5688100e397369
@respx.mock
67
https://github.com/home-assistant/core.git
92
async def test_form_stream_unauthorised(hass, fakeimg_png, user_flow): with patch( "homeassistant.components.generic.config_flow.av.open",
16
124
test_form_stream_unauthorised
11
0
2
7
erpnext/e_commerce/variant_selector/item_variants_cache.py
64,323
fix: Trim spaces from attributes (multi-variant creation) & explicit method for building cache - Multiple Item Variants creation fails due to extra spaces in attributes from popup. Clean them before passing to server side - Mention explicit method to build variants cache to avoid ambiguity between old method path (pre-refactor)
erpnext
9
Python
11
item_variants_cache.py
def enqueue_build_cache(item_code): if frappe.cache().hget('item_cache_build_in_progress', item_code): return frappe.enqueue( "erpnext.e_commerce.variant_selector.item_variants_cache.build_cache", item_code=item_code, queue='long' )
a64228741d065f7ac33b3208d3a704616250f925
34
https://github.com/frappe/erpnext.git
4
def enqueue_build_cache(item_code): if frappe.cache().hget('item_cache_build_in_progress', item_code): return frappe.enqueue( "erpnext.e_commerce.variant_selector.item_variants_cache.build_cache", item_code=item_cod
7
59
enqueue_build_cache
47
0
1
9
tests/css/test_tokenize.py
182,090
Variable references
textual
11
Python
32
test_tokenize.py
def test_variable_declaration_comment_ignored(): css = "$x: red; /* comment */" assert list(tokenize(css, "")) == [ Token(name='variable_declaration_start', value='$x:', path='', code=css, location=(0, 0)), Token(name='whitespace', value=' ', path='', code=css, location=(0, 3)), Token(name='token', value='red', path='', code=css, location=(0, 4)), Token(name='variable_declaration_end', value=';', path='', code=css, location=(0, 7)), Token(name='whitespace', value=' ', path='', code=css, location=(0, 8)), ]
d86ec1889e259c969a035cef413fee347bb76414
155
https://github.com/Textualize/textual.git
90
def test_variable_declaration_comment_ignored(): css = "$x: red; /* comment */" assert list(tokenize(css, "")) == [ Token(name='variable_declaration_start', value='$x:', path='', code=css, location=(0, 0)),
10
242
test_variable_declaration_comment_ignored
92
0
1
16
test/lib/ansible_test/_internal/host_profiles.py
268,811
ansible-test - Support RSA SHA-1 for SSH clients.
ansible
13
Python
73
host_profiles.py
def get_controller_target_connections(self) -> list[SshConnection]: containers = get_container_database(self.args) access = containers.data[HostType.control]['__test_hosts__'][self.container_name] host = access.host_ip port = dict(access.port_map())[22] settings = SshConnectionDetail( name=self.config.name, user='root', host=host, port=port, identity_file=SshKey(self.args).key, python_interpreter=self.python.path, # CentOS 6 uses OpenSSH 5.3, making it incompatible with the default configuration of OpenSSH 8.8 and later clients. # Since only CentOS 6 is affected, and it is only supported by ansible-core 2.12, support for RSA SHA-1 is simply hard-coded here. # A substring is used to allow custom containers to work, not just the one provided with ansible-test. enable_rsa_sha1='centos6' in self.config.image, ) return [SshConnection(self.args, settings)]
75b60b17ee1ff18ded04cf07b71e4ee32e673a0b
120
https://github.com/ansible/ansible.git
258
def get_controller_target_connections(self) -> list[SshConnection]: containers = get_container_database(self.args) access = containers.data[HostType.control]['__test_hosts__'][self.container_name] host = access.host_ip port = dict(access.port_map())[22] settings = SshConnectionDetail( name=self.config.name, user='root', host=ho
30
189
get_controller_target_connections
39
1
1
20
tests/gamestonk_terminal/stocks/options/test_payoff_controller.py
280,930
Tests : Stocks > Options (#1125) * Update tests : conftest * Updating tests : stocks/options * Updating tests : fix typing * Updating tests : black * Updating tests : pyupgrade * Updating tests : black * Updating tests : mock dates in cassettes * Updating tests : conftest * Updating tests : black * Updating tests : force single threading * Updating tests : skip * Updating tests : black * Updating tests : conftest * Update tests : skip stocks/options/controller * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : fixing issue * Updating tests : add init * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : conftest * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip
OpenBBTerminal
10
Python
30
test_payoff_controller.py
def test_menu_with_queue(expected, mocker, queue): path_controller = "gamestonk_terminal.stocks.options.payoff_controller" # MOCK CHAIN + PRICE mocker.patch( target=f"{path_controller}.get_option_chain", return_value=CHAIN, ) mocker.patch( target=f"{path_controller}.get_price", return_value=95.0, ) # MOCK SWITCH mocker.patch( target=f"{path_controller}.PayoffController.switch", return_value=["quit"], ) result_menu = payoff_controller.menu( ticker="MOCK_TICKER", expiration="2022-01-07", queue=queue, ) assert result_menu == expected @pytest.mark.vcr(record_mode="none")
000d1e93d7187299dce5653f781345031a9ad96f
@pytest.mark.vcr(record_mode="none")
81
https://github.com/OpenBB-finance/OpenBBTerminal.git
136
def test_menu_with_queue(expected, mocker, queue): path_controller = "gamestonk_terminal.stocks.options.payoff_controller" # MOCK CHAIN + PRICE mocker.patch( target=f"{path_controller}.get_option_chain", return_value=CHAIN, ) mocker.patch( target=f"{path_controller}.get_price", return_va
18
159
test_menu_with_queue
9
0
1
3
tests/sentry/integrations/msteams/test_message_builder.py
92,817
ref(msteams): Use message builder for help, installation and identity messages (#36495) Move the JSON blobs for help, installation and identity related messages to a message builder hierarchy like the one we use for Slack. Currently, all the cards that we use for the Microsoft Teams notifications are in the form of JSON blobs in sentry/integrations/msteams/card_builder.py. This is not good for maintainability and there is lot of code duplication.
sentry
10
Python
9
test_message_builder.py
def test_personal_installation_message(self): personal_installation_card = build_personal_installation_message() assert 2 == len(personal_installation_card["body"])
d435bb742d0ea89ae5e40e81ed198773262c1607
20
https://github.com/getsentry/sentry.git
22
def test_personal_installation_message(self): personal_installation_card = build_personal_i
5
35
test_personal_installation_message
30
1
3
10
test/prototype_transforms_kernel_infos.py
193,693
diversify parameter types for a couple of prototype kernels (#6635) * add more size types for prototype resize sample inputs * add skip for dispatcher * add more sizes to resize kernel info * add more skips * add more diversity to gaussian_blur parameters * diversify affine parameters and fix bounding box kernel * fix center_crop dispatcher info * revert kernel fixes * add skips for scalar shears in affine_bounding_box
vision
12
Python
21
prototype_transforms_kernel_infos.py
def sample_inputs_affine_image_mask(): for mask_loader, center in itertools.product( make_mask_loaders(sizes=["random"], dtypes=[torch.uint8]), [None, (0, 0)], ): yield ArgsKwargs(mask_loader, center=center, **_AFFINE_KWARGS[0]) for mask_loader, affine_kwargs in itertools.product( make_mask_loaders(sizes=["random"], dtypes=[torch.uint8]), _diversify_affine_kwargs_types(_AFFINE_KWARGS[0]) ): yield ArgsKwargs(mask_loader, **affine_kwargs) @pil_reference_wrapper
29b0831c1f3469b972ad8ad6521d81fc950980c4
@pil_reference_wrapper
100
https://github.com/pytorch/vision.git
75
def sample_inputs_affine_image_mask(): for mask_loader, center in itertools.product( make_mask_loaders(sizes=["random"], dtypes=[torch.uint8]), [None, (0, 0)], ): yield ArgsKwargs(mask_loader, center=center, **_AFFINE_KWARG
15
155
sample_inputs_affine_image_mask
54
0
1
25
saleor/graphql/checkout/mutations/checkout_lines_delete.py
27,620
Unify checkout mutations/resolvers to use id field. (#9862) * Unify checkout mutations/resolvers to use id field. * Update changelog * Remove uneeded " " in mutation's field description
saleor
10
Python
42
checkout_lines_delete.py
def perform_mutation(cls, _root, info, lines_ids, token=None, id=None): checkout = get_checkout( cls, info, checkout_id=None, token=token, id=id, error_class=CheckoutErrorCode, ) _, lines_to_delete = resolve_global_ids_to_primary_keys( lines_ids, graphene_type="CheckoutLine", raise_error=True ) cls.validate_lines(checkout, lines_to_delete) checkout.lines.filter(id__in=lines_to_delete).delete() lines, _ = fetch_checkout_lines(checkout) manager = info.context.plugins checkout_info = fetch_checkout_info( checkout, lines, info.context.discounts, manager ) update_checkout_shipping_method_if_invalid(checkout_info, lines) recalculate_checkout_discount( manager, checkout_info, lines, info.context.discounts ) manager.checkout_updated(checkout) return CheckoutLinesDelete(checkout=checkout)
3673e7e11f22e5a695c708b7a594c11857a93898
146
https://github.com/saleor/saleor.git
257
def perform_mutation(cls, _root, info, lines_ids, token=None, id=None): checkout = get_checkout( cls, info, checkout_id=None, token=token, id=id, error_class=CheckoutErrorCode, ) _, lines_to_delete = resolve_global_ids_to_primary_keys( lines_ids, graphene_type="CheckoutLine", raise_error=True ) cls.validate_lines(checkout, lines_to_delete) checkout.lines.filter(id__in=lines_to_delete).delete() lines, _ = fetch_checkout_lines(checkout) manager = info.context.plugins checkout_info = fetch_checkout_info( checkout, lines, info.context.discounts, manager ) upd
33
213
perform_mutation
20
0
1
8
src/transformers/models/opt/modeling_tf_opt.py
30,778
Opt in flax and tf (#17388) * initial commit * add init file * update globakl init * update index and dummy objects * style * update modelling auto * fix initi typo in src/transformers * fix typo in modeling tf auto, opt was in wrong mapping name * fixed a slow test : saved_model * style * fix positionnal embedding if no position id is provided * update tf test * update test flax requirements * fixed serialization * update * update tf name to allow smooth convertion * update flax tests * style * fix test typo * fix tf typo test * add xla for generate support in causal LM * fixed bug * cleaned tf tests * style * removed from PT for slow tests * fix typp * opt test as slow * trying to fix GPT2 undefined * correct documentation and add to test doc * update tf doc * fix doc * fake commit * Apply suggestions from code review Co-authored-by: Joao Gante <joaofranciscocardosogante@gmail.com> * update test based on review * merged main layer for functionning test * fixup + quality * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * update long comment * make fix copies Co-authored-by: Arthur <arthur@huggingface.co> Co-authored-by: Joao Gante <joaofranciscocardosogante@gmail.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
transformers
11
Python
17
modeling_tf_opt.py
def dummy_inputs(self): pad_token = 1 input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32) dummy_inputs = { "attention_mask": tf.math.not_equal(input_ids, pad_token), "input_ids": input_ids, } return dummy_inputs
7822a9b7a7b93b5dbf04eee7db3d2423ced1f9b6
48
https://github.com/huggingface/transformers.git
76
def dummy_inputs(self): pad_token = 1 input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32) dummy_inputs = { "attention_mask": tf.mat
11
77
dummy_inputs
36
0
1
18
saleor/graphql/discount/mutations/sale_add_catalogues.py
29,783
Add plugin manager promise (#11414)
saleor
12
Python
24
sale_add_catalogues.py
def perform_mutation(cls, _root, info, **data): sale = cls.get_node_or_error( info, data.get("id"), only_type=Sale, field="sale_id" ) previous_catalogue = fetch_catalogue_info(sale) manager = get_plugin_manager_promise(info.context).get() with traced_atomic_transaction(): cls.add_catalogues_to_node(sale, data.get("input")) current_catalogue = fetch_catalogue_info(sale) previous_cat_converted = convert_catalogue_info_to_global_ids( previous_catalogue ) current_cat_converted = convert_catalogue_info_to_global_ids( current_catalogue )
decd505f55d02c616ce5b804c06a71e120d15c15
113
https://github.com/saleor/saleor.git
177
def perform_mutation(cls, _root, info, **data): sale = cls.get_node_or_error( info, data.get("id"), only_type=Sale, field="sale_id" ) previous_catalogue = fetch_catalogue_info(sale) manager = get_plugin_manager_promise(info.context).get() with traced_atomic_transaction(): cls.add_catalogues_to_node(sale, data.get("input")) current_catalogue = fetch_catalogue_info(sale) previous_cat_converted = convert_catalogue_info_to_global_ids( previous_catalogue ) current_cat_converted = convert_catalogue_info_to_global_ids(
22
147
perform_mutation
31
0
1
13
saleor/graphql/checkout/tests/test_checkout.py
26,886
Transaction mutations for new checkout flow (#9564) * Add new mutation for creating order from checkout * Add implementaion of mutation CheckoutFromOrderCreate * Add preview label * Add mutations for manage payments * Clean up with new migration files * Add clean up after adding PoC changes * Clean up around payment mutations * Add test for new fields in payment type * Add tests for payment mutations * Add changes after self review * Add preview label to description of a new payment mutations * Remove field for depreceated error field * Add missing error code to Saleor error codes * Move validation to mutation part * Fix typo in field describtion * Apply changes after review * Clean in doc string in main method of the PR. Remove fixme that will be covered by separate PR * Add missing space in description of the field. * Apply changes after review * Fix incorrect field name for input id field * Rename orderFromCheckoutCreate to orderCreateFromCheckout * Add label ADDED_IN_32 to mutation description * Use HANDLE_CHECKOUTS permission, instead of MANAGE_CHECKOUTS * Update changelog * Fix tests * Add main logic for new handler in manager and webhook plugin * Add payment action request hooks to payment mutations * Add migration with new possible events for order * Add payment action request handlers to order mutations * Apply changes after review * Fix tests * Fix tests * Add tests for new payment flow in order mutation for payment actions * Add support for refund webhook action to fulfillment mutation related to return&refund * Apply changes after review * Add TransactionItem model for new checkout approach * Apply changes after self-review * Use createdAt and modifiedAt for new type and model * Apply changes after review * Add mutation to call transaction action * Add TransactionEvent to track changes made on TransactionItem * Clean up after self-review * Add missing space in private metadata description * Fix inccorect permission name in comment. Add missing added_in_x label * Add missing added_in_3x label * Add permissions for metadata * Apply changes after review * Apply changes after review * Make cleanup with mutation/query fields labels * Clean up after self-review * Update changelog * Attach transactions to checkout * Attach transactions created for checkout * Use [] instead of .get for dict in dataloader * Add subscription for transaction action request * Fix failing is_event_active * Clean up changelog after merging main * Clean up changelog after merging main * Add missing transaction event * Limit transaction mutations to be used only by apps * Use event.reference instead of event.payment_id for transactions * Fix failing migration * Apply changes after review * Update scheme
saleor
11
Python
27
test_checkout.py
def test_checkout_transactions_missing_permission(api_client, checkout): # given checkout.payment_transactions.create( status="Authorized", type="Credit card", reference="123", currency="USD", authorized_value=Decimal("15"), available_actions=[TransactionAction.CAPTURE, TransactionAction.VOID], ) query = QUERY_CHECKOUT_TRANSACTIONS variables = {"token": str(checkout.token)} # when response = api_client.post_graphql(query, variables) # then assert_no_permission(response)
df31433d96cde352e4d62181e39bb8efcf7c9f2a
78
https://github.com/saleor/saleor.git
99
def test_checkout_transactions_missing_permission(api_client, checkout): # given checkout.payment_transactions.create( status="Authorized", type="Credit card", reference="123", currency="USD", authorized_value=Decimal("15"), available_actions=[TransactionAction.CAPTURE, TransactionAction.VOID], ) query = QUERY_CHECKOUT_TRANSACTIONS variables = {"token": str(checkout.token)} # when response = api_client.
23
130
test_checkout_transactions_missing_permission
19
0
1
13
seaborn/_core/plot.py
41,798
Improve how inline pngs get scaled when using a tight bbox
seaborn
9
Python
18
plot.py
def save(self, fname, **kwargs) -> Plot: # TODO expose important keyword arguments in our signature? self.plot().save(fname, **kwargs) return self
e2c449e18bf47a6907b0d8e88b5673f2a9b45790
28
https://github.com/mwaskom/seaborn.git
47
def save(self, fname, **kwargs) -> Plot: # TODO
6
47
save
1,312
0
151
513
mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
116,197
keep datetype from predictor
mindsdb
24
Python
496
sql_query.py
def execute_step(self, step, steps_data): if type(step) == GetPredictorColumns: predictor_name = step.predictor.parts[-1] dn = self.datahub.get(self.mindsdb_database_name) columns = dn.get_table_columns(predictor_name) columns = [ (column_name, column_name) for column_name in columns ] data = { 'values': [], 'columns': { (self.mindsdb_database_name, predictor_name, predictor_name): columns }, 'tables': [(self.mindsdb_database_name, predictor_name, predictor_name)] } elif type(step) == GetTableColumns: table = step.table dn = self.datahub.get(step.namespace) ds_query = Select(from_table=Identifier(table), targets=[Star()]) data, columns_info = dn.query(ds_query) table_alias = (self.database, table, table) data = { 'values': [], 'columns': { table_alias: columns_info }, 'tables': [table_alias] } elif type(step) == FetchDataframeStep: data = self._fetch_dataframe_step(step) elif type(step) == UnionStep: raise ErNotSupportedYet('Union step is not implemented') # TODO add union support # left_data = steps_data[step.left.step_num] # right_data = steps_data[step.right.step_num] # data = left_data + right_data elif type(step) == MapReduceStep: try: if step.reduce != 'union': raise ErLogicError(f'Unknown MapReduceStep type: {step.reduce}') step_data = steps_data[step.values.step_num] vars = [] step_data_values = step_data['values'] for row in step_data_values: var_group = {} vars.append(var_group) for row_data in row.values(): for name, value in row_data.items(): if name[0] != '__mindsdb_row_id': var_group[name[1] or name[0]] = value data = { 'values': [], 'columns': {}, 'tables': [] } substep = step.step if type(substep) == FetchDataframeStep: query = substep.query for var_group in vars: markQueryVar(query.where) for name, value in var_group.items(): replaceQueryVar(query.where, value, name) sub_data = self._fetch_dataframe_step(substep) if len(data['columns']) == 0: data['columns'] = sub_data['columns'] if len(data['tables']) == 0: data['tables'] = sub_data['tables'] data['values'].extend(sub_data['values']) unmarkQueryVar(query.where) elif type(substep) == MultipleSteps: data = self._multiple_steps_reduce(substep, vars) else: raise ErLogicError(f'Unknown step type: {step.step}') except Exception as e: raise SqlApiUnknownError(f'error in map reduce step: {e}') from e elif type(step) == MultipleSteps: if step.reduce != 'union': raise ErNotSupportedYet(f"Only MultipleSteps with type = 'union' is supported. Got '{step.type}'") data = None for substep in step.steps: subdata = self.execute_step(substep, steps_data) if data is None: data = subdata else: data['values'].extend(subdata['values']) elif type(step) == ApplyPredictorRowStep: try: predictor = '.'.join(step.predictor.parts) dn = self.datahub.get(self.mindsdb_database_name) where_data = step.row_dict data = dn.query( table=predictor, where_data=where_data ) data = [{(key, key): value for key, value in row.items()} for row in data] table_name = get_preditor_alias(step, self.database) values = [{table_name: x} for x in data] columns = {table_name: []} if len(data) > 0: row = data[0] columns[table_name] = list(row.keys()) # TODO else data = { 'values': values, 'columns': columns, 'tables': [table_name] } except Exception as e: if isinstance(e, SqlApiException): raise e else: raise SqlApiUnknownError(f'error in apply predictor row step: {e}') from e elif type(step) in (ApplyPredictorStep, ApplyTimeseriesPredictorStep): try: # set row_id data = steps_data[step.dataframe.step_num] row_id_col = ('__mindsdb_row_id', '__mindsdb_row_id') for table in data['columns']: data['columns'][table].append(row_id_col) row_count = len(data['values']) for i, row in enumerate(data['values']): for n, table_name in enumerate(row): row[table_name][row_id_col] = self.row_id + i + n * row_count # shift counter self.row_id += self.row_id + row_count * len(data['tables']) dn = self.datahub.get(self.mindsdb_database_name) predictor = '.'.join(step.predictor.parts) where_data = [] for row in steps_data[step.dataframe.step_num]['values']: new_row = {} for table_name in row: keys_intersection = set(new_row) & set(row[table_name]) if len(keys_intersection) > 0: raise ErLogicError( f'The predictor got two identical keys from different datasources: {keys_intersection}' ) new_row.update(row[table_name]) where_data.append(new_row) where_data = [{key[1]: value for key, value in row.items()} for row in where_data] is_timeseries = self.planner.predictor_metadata[predictor]['timeseries'] _mdb_forecast_offset = None if is_timeseries: if '> LATEST' in self.query_str: # stream mode -- if > LATEST, forecast starts on inferred next timestamp _mdb_forecast_offset = 1 elif '= LATEST' in self.query_str: # override: when = LATEST, forecast starts on last provided timestamp instead of inferred next time _mdb_forecast_offset = 0 else: # normal mode -- emit a forecast ($HORIZON data points on each) for each provided timestamp _mdb_forecast_offset = None for row in where_data: if '__mdb_forecast_offset' not in row: row['__mdb_forecast_offset'] = _mdb_forecast_offset # for row in where_data: # for key in row: # if isinstance(row[key], datetime.date): # row[key] = str(row[key]) table_name = get_preditor_alias(step, self.database) columns = {table_name: []} if len(where_data) == 0: # no data, don't run predictor cols = dn.get_table_columns(predictor) + ['__mindsdb_row_id'] columns[table_name] = [(c, c) for c in cols] values = [] else: data = dn.query( table=predictor, where_data=where_data ) data = [{(key, key): value for key, value in row.items()} for row in data] values = [{table_name: x} for x in data] if len(data) > 0: row = data[0] columns[table_name] = list(row.keys()) # TODO else data = { 'values': values, 'columns': columns, 'tables': [table_name], 'types': {table_name: self.model_types} } except Exception as e: raise SqlApiUnknownError(f'error in apply predictor step: {e}') from e elif type(step) == JoinStep: try: left_data = steps_data[step.left.step_num] right_data = steps_data[step.right.step_num] # FIXME https://github.com/mindsdb/mindsdb_sql/issues/136 # is_timeseries = False # if True in [type(step) == ApplyTimeseriesPredictorStep for step in plan.steps]: # right_data = steps_data[step.left.step_num] # left_data = steps_data[step.right.step_num] # is_timeseries = True if step.query.condition is not None: raise ErNotSupportedYet('At this moment supported only JOIN without condition') if step.query.join_type.upper() not in ('LEFT JOIN', 'JOIN'): raise ErNotSupportedYet('At this moment supported only JOIN and LEFT JOIN') if len(left_data['tables']) == 0 or len(right_data['tables']) == 0: raise ErLogicError('Table for join is not found') if ( len(left_data['tables']) != 1 or len(right_data['tables']) != 1 or left_data['tables'][0] == right_data['tables'][0] ): raise ErNotSupportedYet('At this moment supported only JOIN of two different tables') data = { 'values': [], 'columns': {}, 'tables': list(set(left_data['tables'] + right_data['tables'])), 'types': {} } for data_part in [left_data, right_data]: for table_name in data_part['columns']: if table_name not in data['columns']: data['columns'][table_name] = data_part['columns'][table_name] # keep types data['types'][table_name] = data_part.get('types', {}).get(table_name, {}).copy() else: data['columns'][table_name].extend(data_part['columns'][table_name]) # keep types data['types'][table_name].update(data_part.get('types', {}).get(table_name, {})) for table_name in data['columns']: data['columns'][table_name] = list(set(data['columns'][table_name])) left_key = left_data['tables'][0] right_key = right_data['tables'][0] left_columns_map = OrderedDict() left_columns_map_reverse = OrderedDict() for i, column_name in enumerate(left_data['columns'][left_key]): left_columns_map[f'a{i}'] = column_name left_columns_map_reverse[column_name] = f'a{i}' right_columns_map = {} right_columns_map_reverse = {} for i, column_name in enumerate(right_data['columns'][right_key]): right_columns_map[f'b{i}'] = column_name right_columns_map_reverse[column_name] = f'b{i}' left_df_data = [] for row in left_data['values']: row = row[left_key] left_df_data.append({left_columns_map_reverse[key]: value for key, value in row.items()}) right_df_data = [] for row in right_data['values']: row = row[right_key] right_df_data.append({right_columns_map_reverse[key]: value for key, value in row.items()}) df_a = pd.DataFrame(left_df_data, columns=left_columns_map.keys()) df_b = pd.DataFrame(right_df_data, columns=right_columns_map.keys()) a_name = f'a{round(time.time() * 1000)}' b_name = f'b{round(time.time() * 1000)}' con = duckdb.connect(database=':memory:') con.register(a_name, df_a) con.register(b_name, df_b) resp_df = con.execute(f).fetchdf() con.unregister(a_name) con.unregister(b_name) con.close() resp_df = resp_df.replace({np.nan: None}) resp_dict = resp_df.to_dict(orient='records') for row in resp_dict: new_row = {left_key: {}, right_key: {}} for key, value in row.items(): if key.startswith('a'): new_row[left_key][left_columns_map[key]] = value else: new_row[right_key][right_columns_map[key]] = value data['values'].append(new_row) # remove all records with empty data from predictor from join result # otherwise there are emtpy records in the final result: # +------------+------------+-------+-----------+----------+ # | time | time | state | pnew_case | new_case | # +------------+------------+-------+-----------+----------+ # | 2020-10-21 | 2020-10-24 | CA | 0.0 | 5945.0 | # | 2020-10-22 | 2020-10-23 | CA | 0.0 | 6141.0 | # | 2020-10-23 | 2020-10-22 | CA | 0.0 | 2940.0 | # | 2020-10-24 | 2020-10-21 | CA | 0.0 | 3707.0 | # | NULL | 2020-10-20 | NULL | nan | nan | # | NULL | 2020-10-19 | NULL | nan | nan | # | NULL | 2020-10-18 | NULL | nan | nan | # | NULL | 2020-10-17 | NULL | nan | nan | # | NULL | 2020-10-16 | NULL | nan | nan | # +------------+------------+-------+-----------+----------+ # 9 rows in set (2.07 sec) # if is_timeseries: # data_values = [] # for row in data['values']: # for key in row: # if 'mindsdb' in key: # if not is_empty_prediction_row(row[key]): # data_values.append(row) # break # data['values'] = data_values except Exception as e: raise SqlApiUnknownError(f'error in join step: {e}') from e elif type(step) == FilterStep: step_data = steps_data[step.dataframe.step_num] # dicts to look up column and table column_idx = {} tables_idx = {} col_table_idx = {} # prepare columns for dataframe. column name contains table name cols = set() for table, col_list in step_data['columns'].items(): _, t_name, t_alias = table tables_idx[t_name] = t_name tables_idx[t_alias] = t_name for column in col_list: # table_column c_name, c_alias = column col_name = f'{t_name}^{c_name}' cols.add(col_name) col_table_idx[col_name] = (table, column) column_idx[c_name] = t_name # prepare dict for dataframe result = [] for row in step_data['values']: data_row = {} for table, col_list in step_data['columns'].items(): for col in col_list: col_name = f'{table[1]}^{col[0]}' data_row[col_name] = row[table][col] result.append(data_row) df = pd.DataFrame(result, columns=list(cols)) # analyze condition and change name of columns
5b1cd41a6202873e49c9ec43c770cf7d1f700adb
3,772
https://github.com/mindsdb/mindsdb.git
6,885
def execute_step(self, step, steps_data): if type(step) == GetPredictorColumns: predictor_name = step.predictor.parts[-1] dn = self.datahub.get(self.mindsdb_database_name) columns = dn.get_table_columns(predictor_name) columns = [ (column_name, column_name) for column_name in columns ] data = { 'values': [], 'columns': { (self.mindsdb_database_name, predictor_name, predictor_name): columns }, 'tables': [(self.mindsdb_database_name, predictor_name, predictor_name)] } elif type(step) == GetTableColumns: table = step.table dn = self.datahub.get(step.namespace) ds_query = Select(from_table=Identifier(table), targets=[Star()]) data, columns_info = dn.query(ds_query) table_alias = (self.database, table, table) data = { 'values': [], 'columns': { table_alias: columns_info }, 'tables': [table_alias] } elif type(step) == FetchDataframeStep: data = self._fetch_dataframe_step(step) elif type(step) == UnionStep: raise ErNotSupportedYet('Union step is not implemented') # TODO add union support # left_data = steps_data[step.left.step_num] # right_data = steps_data[step.right.step_num] # data = left_data + right_data elif type(step) == MapReduceStep: try: if step.reduce != 'union': raise ErLogicError(f'Unknown MapReduceStep type: {step.reduce}') step_data = steps_data[step.values.step_num] vars = [] step_data_values = step_data['values'] for row in step_data_values: var_group = {} vars.append(var_group) for row_data in row.values(): for name, value in row_data.items(): if name[0] != '__mindsdb_row_id': var_group[name[1] or name[0]] = value data = { 'values': [], 'columns': {}, 'tables': [] } substep = step.step if type(substep) == FetchDataframeStep: query = substep.query for var_group in vars: markQueryVar(query.where) for name, value in var_group.items(): replaceQueryVar(query.where, value, name) sub_data = self._fetch_dataframe_step(substep) if len(data['columns']) == 0: data['columns'] = sub_data['columns'] if len(data['tables']) == 0: data['tables'] = sub_data['tables'] data['values'].extend(sub_data['values']) unmarkQueryVar(query.where) elif type(substep) == MultipleSteps: data = self._multiple_steps_reduce(substep, vars) else: raise ErLogicError(f'Unknown step type: {step.step}') except Exception as e: raise SqlApiUnknownError(f'error in map reduce step: {e}') from e elif type(step) == MultipleSteps: if step.reduce != 'union': raise ErNotSupportedYet(f"Only MultipleSteps with type = 'union' is supported. Got '{step.type}'") data = None for substep in step.steps: subdata = self.execute_step(substep, steps_data) if data is None: data = subdata else: data['values'].extend(subdata['values']) elif type(step) == ApplyPredictorRowStep: try: predictor = '.'.join(step.predictor.parts) dn = self.datahub.get(self.mindsdb_database_name) where_data = step.row_dict data = dn.query( table=predictor, where_data=where_data ) data = [{(key, key): value for key, value in row.items()} for row in data] table_name = get_preditor_alias(step, self.database) values = [{table_name: x} for x in data] columns = {table_name: []} if len(data) > 0: row = data[0] columns[table_name] = list(row.keys()) # TODO else data = { 'values': values, 'columns': columns, 'tables': [table_name] } except Exception as e: if isinstance(e, SqlApiException): raise e else: raise SqlApiUnknownError(f'error in apply predictor row step: {e}') from e elif type(step) in (ApplyPredictorStep, ApplyTimeseriesPredictorStep): try: # set row_id data = steps_data[step.dataframe.step_num] row_id_col = ('__mindsdb_row_id', '__mindsdb_row_id') for table in data['columns']: data['columns'][table].append(row_id_col) row_count = len(data['values']) for i, row in enumerate(data['values']): for n, table_name in enumerate(row): row[table_name][row_id_col] = self.row_id + i + n * row_count # shift counter self.row_id += self.row_id + row_count * len(data['tables']) dn = self.datahub.get(self.mindsdb_database_name) predictor = '.'.join(step.predictor.parts) where_data = [] for row in steps_data[step.dataframe.step_num]['values']: new_row = {} for table_name in row: keys_intersection = set(new_row) & set(row[table_name]) if len(keys_intersection) > 0: raise ErLogicError( f'The predictor got two identical keys from different datasources: {keys_intersection}' ) new_row.update(row[table_name]) where_data.append(new_row) where_data = [{key[1]: value for key, value in row.items()} for row in where_data] is_timeseries = self.planner.predictor_metadata[predictor]['timeseries'] _mdb_forecast_offset = None if is_timeseries: if '> LATEST' in self.query_str: # stream mode -- if > LATEST, forecast starts on inferred next timestamp _mdb_forecast_offset = 1 elif '= LATEST' in self.query_str: # override: when = LATEST, forecast starts on last provided timestamp instead of inferred next time _mdb_forecast_offset = 0 else: # normal mode -- emit a forecast ($HORIZON data points on each) for each provided timestamp _mdb_forecast_offset = None for row in where_data: if '__mdb_forecast_offset' not in row: row['__mdb_forecast_offset'] = _mdb_forecast_offset # for row in where_data: # for key in row: # if isinstance(row[key], datetime.date): # row[key] = str(row[key]) table_name = get_preditor_alias(step, self.database) columns = {table_name: []} if len(where_data) == 0: # no data, don't run predictor cols = dn.get_table_columns(predictor) + ['__mindsdb_row_id'] columns[table_name] = [(c, c) for c in cols] values = [] else: data = dn.query( table=predictor, where_data=where_data ) data = [{(key, key): value for key, value in row.items()} for row in data] values = [{table_name: x} for x in data] if len(data) > 0: row = data[0] columns[table_name] = list(row.keys()) # TODO else data = { 'values': values, 'columns': columns, 'tables': [table_name], 'types': {table_name: self.model_types} } except Exception as e: raise SqlApiUnknownError(f'error in apply predictor step: {e}') from e elif type(step) == JoinStep: try: left_data = steps_data[step.left.step_num] right_data = steps_data[step.right.step_num] # FIXME https://github.com/mindsdb/mindsdb_sql/issues/136 # is_timeseries = False # if True in [type(step) == ApplyTimeseriesPredictorStep for step in plan.steps]: # right_data = steps_data[step.left.step_num] # left_data = steps_data[step.right.step_num] # is_timeseries = True if step.query.condition is not None: raise ErNotSupportedYet('At this moment supported only JOIN without condition') if step.query.join_type.upper() not in ('LEFT JOIN', 'JOIN'): raise ErNotSupportedYet('At this moment supported only JOIN and LEFT JOIN') if len(left_data['tables']) == 0 or len(right_data['tables']) == 0: raise ErLogicError('Table for join is not found') if ( len(left_data['tables']) != 1 or len(right_data['tables']) != 1 or left_data['tables'][0] == right_data['tables'][0] ): raise ErNotSupportedYet('At this moment supported only JOIN of two different tables') data = { 'values': [], 'columns': {}, 'tables': list(set(left_data['tables'] + right_data['tables'])), 'types': {} } for data_part in [left_data, right_data]: for table_name in data_part['columns']: if table_name not in data['columns']: data['columns'][table_name] = data_part['columns'][table_name] # keep types data['types'][table_name] = data_part.get('types', {}).get(table_name, {}).copy() else: data['columns'][table_name].extend(data_part['columns'][table_name]) # keep types data['types'][table_name].update(data_part.get('types', {}).get(table_name, {})) for table_name in data['columns']: data['columns'][table_name] = list(set(data['columns'][table_name])) left_key = left_data['tables'][0] right_key = right_data['tables'][0] left_columns_map = OrderedDict() left_columns_map_reverse = OrderedDict() for i, column_name in enumerate(left_data['columns'][left_key]): left_columns_map[f'a{i}'] = column_name left_columns_map_reverse[column_name] = f'a{i}' right_columns_map = {} right_columns_map_reverse = {} for i, column_name in enumerate(right_data['columns'][right_key]): right_columns_map[f'b{i}'] = column_name right_columns_map_reverse[column_name] = f'b{i}' left_df_data = [] for row in left_data['values']: row = row[left_key] left_df_data.append({left_columns_map_reverse[key]: value for key, value in row.items()}) right_df_data = [] for row in right_data['values']: row = row[right_key] right_df_data.append({right_columns_map_reverse[key]: value for key, value in row.items()}) df_a = pd.DataFrame(left_df_data, columns=left_columns_map.keys()) df_b = pd.DataFrame(right_df_data, columns=right_columns_map.keys()) a_name = f'a{round(time.time() * 1000)}' b_name = f'b{round(time.time() * 1000)}' con = duckdb.connect(database=':memory:') con.register(a_name, df_a) con.register(b_name, df_b) resp_df = con.execute(f).fetchdf() con.unregister(a_name) con.unregister(b_name) con.close() resp_df = resp_df.replace({np.nan: None}) resp_dict = resp_df.to_dict(orient='records') for row in resp_dict: new_row = {left_key: {}, right_key: {}} for key, value in row.items(): if key.startswith('a'): new_row[left_key][left_columns_map[key]] = value else: new_row[right_key][right_columns_map[key]] = value data['values'].append(new_row) # remo
157
3,598
execute_step
13
0
1
3
keras/utils/control_flow_util.py
276,710
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
8
Python
13
control_flow_util.py
def InXlaContext(graph): ctxt = graph._get_control_flow_context() # pylint: disable=protected-access return GetContainingXLAContext(ctxt) is not None
84afc5193d38057e2e2badf9c889ea87d80d8fbf
20
https://github.com/keras-team/keras.git
19
def InXlaContext(graph): ctxt = graph._get_control_flow_context() # pylint: disable=protected-access return GetContainingXLAContext(ctxt) is not None
5
34
InXlaContext
23
0
3
8
pipenv/patched/notpip/_vendor/distlib/_backport/shutil.py
21,357
Vendor in pip 22.1.2
pipenv
10
Python
16
shutil.py
def _destinsrc(src, dst): src = abspath(src) dst = abspath(dst) if not src.endswith(os.path.sep): src += os.path.sep if not dst.endswith(os.path.sep): dst += os.path.sep return dst.startswith(src)
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
66
https://github.com/pypa/pipenv.git
51
def _destinsrc(src, dst): src = abspath(src) dst = abspath(dst) if not src.endswith(os.path.sep): src += os.path.sep if not dst.endswith(os.path.sep): dst += os.path.sep return dst.sta
9
106
_destinsrc
163
0
6
51
jaxlib/lapack.py
122,281
Add input-output aliasing annotations for LAPACK calls on CPU. PiperOrigin-RevId: 480156067
jax
16
Python
105
lapack.py
def orgqr_mhlo(dtype, a, tau): a_type = ir.RankedTensorType(a.type) dims = a_type.shape assert len(dims) >= 2 m, n = dims[-2:] batch_dims = tuple(dims[:-2]) num_bd = len(batch_dims) b = 1 for d in batch_dims: b *= d tau_dims = ir.RankedTensorType(tau.type).shape assert tau_dims[:-1] == dims[:-2], (tau.type, a.type) k = tau_dims[-1] if dtype == np.float32: fn = b"lapack_sorgqr" lwork = _lapack.lapack_sorgqr_workspace(m, n, k) elif dtype == np.float64: fn = b"lapack_dorgqr" lwork = _lapack.lapack_dorgqr_workspace(m, n, k) elif dtype == np.complex64: fn = b"lapack_cungqr" lwork = _lapack.lapack_cungqr_workspace(m, n, k) elif dtype == np.complex128: fn = b"lapack_zungqr" lwork = _lapack.lapack_zungqr_workspace(m, n, k) else: raise NotImplementedError(f"Unsupported dtype {dtype}") scalar_layout = [] layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1)) i32_type = ir.IntegerType.get_signless(32) out = custom_call( fn, [ a.type, ir.RankedTensorType.get(batch_dims, i32_type), ir.RankedTensorType.get([lwork], a_type.element_type), ], [_mhlo_s32(int(b)), _mhlo_s32(m), _mhlo_s32(n), _mhlo_s32(k), _mhlo_s32(lwork), a, tau], operand_layouts=[scalar_layout] * 5 + [ layout, tuple(range(num_bd, -1, -1)), ], result_layouts=[ layout, tuple(range(num_bd - 1, -1, -1)), [0], ], operand_output_aliases={5: 0}, ) return out[:2] # ?potrf: Cholesky decomposition
2246887f7b39f291c647332251b1a105e9784341
394
https://github.com/google/jax.git
320
def orgqr_mhlo(dtype, a, tau): a_type = ir.RankedTensorType(a.type) dims = a_type.shape assert len(dims) >= 2 m, n = dims[-2:] batch_dims = tuple(dims[:-2]) num_bd = len(batch_dims) b = 1 for d in batch_dims: b *= d tau_dims = ir.RankedTensorType(tau.type).shape assert tau_dims[:-1] == dims[:-2], (tau.type, a.type) k = tau_dims[-1] if dtype == np.flo
48
591
orgqr_mhlo
24
0
1
6
sympy/utilities/tests/test_iterables.py
197,072
compat: add back deprecated stub files in iterables The ordered and default_sort_key functions where removed from sympy.utilities.iterables in GH-22357 but no deprecated stub functions were left behind. This commit adds and tests the stubs to ensure that anyone depending on importing these functions like from sympy.utilities.iterables import default_sort_key from sympy.utilities.iterables import ordered will see a deprecation warning rather than an error. The proper way to import these functions both before and after these changes is: from sympy import default_sort_key from sympy import ordered
sympy
13
Python
18
test_iterables.py
def test_deprecated_iterables(): from sympy.utilities.iterables import default_sort_key, ordered with warns_deprecated_sympy(): assert list(ordered([y, x])) == [x, y] with warns_deprecated_sympy(): assert sorted([y, x], key=default_sort_key) == [x, y]
a0daf4e99d77c586fcc62143c84846a0a98bc363
61
https://github.com/sympy/sympy.git
46
def test_deprecated_iterables(): from sympy
12
96
test_deprecated_iterables
33
0
2
14
homeassistant/components/homekit_controller/media_player.py
311,861
Add missing type hints to homekit_controller (#65368)
core
12
Python
28
media_player.py
def source(self) -> str | None: active_identifier = self.service.value(CharacteristicsTypes.ACTIVE_IDENTIFIER) if not active_identifier: return None this_accessory = self._accessory.entity_map.aid(self._aid) this_tv = this_accessory.services.iid(self._iid) input_source = this_accessory.services.first( service_type=ServicesTypes.INPUT_SOURCE, characteristics={CharacteristicsTypes.IDENTIFIER: active_identifier}, parent_service=this_tv, ) char = input_source[CharacteristicsTypes.CONFIGURED_NAME] return char.value
9f5d77e0df957c20a2af574d706140786f0a551a
95
https://github.com/home-assistant/core.git
140
def source(self) -> str | None: active_identifier = self.service.value(CharacteristicsTypes.ACTIVE_IDENTIFIER) if not active_identifier: return None this_accessory = self._accessory.entity_map.aid(self._aid) this_tv = this_accessory.services.iid(self._iid) input_so
27
149
source
22
0
2
8
examples/model_interpretation/evaluation/accuracy/mrc_f1_evaluate.py
322,794
Add NLP model interpretation (#1752) * upload NLP interpretation * fix problems and relocate project * remove abandoned picture * remove abandoned picture * fix dead link in README * fix dead link in README * fix code style problems * fix CR round 1 * remove .gitkeep files * fix code style * fix file encoding problem * fix code style * delete duplicated files due to directory rebuild * fix CR round 2 * fix code style * fix ernie tokenizer * fix code style * fix problem from CR round 1 * fix bugs * fix README * remove duplicated files * deal with diff of old and new tokenizer results * fix CR round 4 * fix code style * add missing dependence * fix broken import path * move some data file to cloud * MRC upper case to lower case Co-authored-by: Zeyu Chen <chenzeyu01@baidu.com> Co-authored-by: binlinquge <xxx> Co-authored-by: Guo Sheng <guosheng@baidu.com>
PaddleNLP
11
Python
17
mrc_f1_evaluate.py
def read_model_prediction(file_path): f = open(file_path, 'r') predict = {} for l in f.readlines(): ins = json.loads(l) predict[ins['id']] = ins f.close() return predict
93cae49c0c572b5c1ac972759140fbe924b0374d
50
https://github.com/PaddlePaddle/PaddleNLP.git
50
def read_model_prediction(file_path): f = open(file_path, 'r') predict = {} for l in f.readlines(): ins = json.loads
11
84
read_model_prediction
33
0
1
10
rllib/utils/exploration/tests/test_explorations.py
141,102
[RLlib] Move all remaining algos into `algorithms` directory. (#25366)
ray
10
Python
31
test_explorations.py
def test_ddpg(self): # Switch off random timesteps at beginning. We want to test actual # GaussianNoise right away. config = ddpg.DEFAULT_CONFIG.copy() config["exploration_config"]["random_timesteps"] = 0 do_test_explorations( ddpg.DDPG, "Pendulum-v1", config, np.array([0.0, 0.1, 0.0]), expected_mean_action=0.0, )
b5bc2b93c33f0f475af69dd6eca656dcf264612d
59
https://github.com/ray-project/ray.git
129
def test_ddpg(self): # Switch off random timesteps at beginning. We want to test actual # GaussianNoise right away. config = ddpg.DEFAULT_CONF
11
82
test_ddpg
17
0
1
8
tests/helpers/test_entity_registry.py
292,283
Validate in split_entity_id (#66835)
core
10
Python
17
test_entity_registry.py
async def test_invalid_entity_category_str(hass, registry, caplog): entry = er.RegistryEntry( entity_id="light.kitchen", unique_id="5678", platform="hue", entity_category="invalid", ) assert entry.entity_category is None
1bbc1f5f55de29bef86edbf7e504298c3d51bdc8
39
https://github.com/home-assistant/core.git
57
async def test_invalid_entity_category_str(hass, registry, caplog): entry = er.RegistryEntry( entity_id="light.kitchen",
11
67
test_invalid_entity_category_str
83
0
1
25
tests/test_validation.py
30,569
Refactor reporting of optimization failures
OCRmyPDF
10
Python
48
test_validation.py
def test_report_file_size(tmp_path, caplog): in_ = tmp_path / 'a.pdf' out = tmp_path / 'b.pdf' pdf = pikepdf.new() pdf.save(in_) pdf.save(out) opts = make_opts(output_type='pdf') vd.report_output_file_size(opts, in_, out) assert caplog.text == '' caplog.clear() waste_of_space = b'Dummy' * 5000 pdf.Root.Dummy = waste_of_space pdf.save(in_) pdf.Root.Dummy2 = waste_of_space + waste_of_space pdf.save(out) vd.report_output_file_size(opts, in_, out, ['The optional dependency...']) assert 'optional dependency' in caplog.text caplog.clear() vd.report_output_file_size(opts, in_, out, []) assert 'No reason' in caplog.text caplog.clear() opts = make_opts(in_, out, optimize=0, output_type='pdf') vd.report_output_file_size(opts, in_, out, ["Optimization was disabled."]) assert 'disabled' in caplog.text caplog.clear()
17a5b8b43c6afe4455bb9baa436b6046186e5cd2
189
https://github.com/ocrmypdf/OCRmyPDF.git
154
def test_report_file_size(tmp_path, caplog): in_ = tmp_path / 'a.pdf' out = tmp_path / 'b.pdf' pdf = pikepdf.new() pdf.save(in_) pdf.save(out) opts = make_opts(output_type='pdf') vd.report_output_file_size(opts, in_, out) assert caplog.text == '' caplog.clear() waste_of_space = b'Dummy' * 5000 pdf.Root.Dummy = waste_of_space pdf.save(in_) p
21
311
test_report_file_size
4
0
1
3
wagtail/snippets/tests/test_snippets.py
79,632
Make all usage reports use the reference index
wagtail
9
Python
4
test_snippets.py
def setUpTestData(cls): super().setUpTestData() management.call_command("rebuild_references_index")
8691b199672c1b9406a5a5da220e48b0ca9198b6
18
https://github.com/wagtail/wagtail.git
17
def setUpTestData(cls):
5
34
setUpTestData
28
0
5
9
tests/integration/instrumentation/__init__.py
13,268
feat(instrumentation): add OpenTelemetry tracing and metrics with basic configurations (#5175)
jina
14
Python
24
__init__.py
def spans_with_error(spans): error_spans = [] for span in spans: for tag in span['tags']: if 'otel.status_code' == tag.get('key', '') and 'ERROR' == tag.get( 'value', '' ): error_spans.append(span) return error_spans
107631e955b21db8a4ddb3bee02130de3650d032
53
https://github.com/jina-ai/jina.git
95
def spans_with_error(spans): error_spans = [] for span in spans: for tag in span['tags']: if 'otel.status_code' == tag.get('key', '') and 'ERROR' == tag.get( 'value', '' ):
7
95
spans_with_error
134
0
5
31
imagenet/main.py
82,859
If the dataset is not exactly divisible by world_size, the validation accuracy is incorrect. We solve this problem with an auxiliary validation set. (#980)
examples
13
Python
98
main.py
def train(train_loader, model, criterion, optimizer, epoch, args): batch_time = AverageMeter('Time', ':6.3f') data_time = AverageMeter('Data', ':6.3f') losses = AverageMeter('Loss', ':.4e') top1 = AverageMeter('Acc@1', ':6.2f') top5 = AverageMeter('Acc@5', ':6.2f') progress = ProgressMeter( len(train_loader), [batch_time, data_time, losses, top1, top5], prefix="Epoch: [{}]".format(epoch)) # switch to train mode model.train() end = time.time() for i, (images, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) if args.gpu is not None: images = images.cuda(args.gpu, non_blocking=True) if torch.cuda.is_available(): target = target.cuda(args.gpu, non_blocking=True) # compute output output = model(images) loss = criterion(output, target) # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), images.size(0)) top1.update(acc1[0], images.size(0)) top5.update(acc5[0], images.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: progress.display(i + 1)
d5478765d38210addf474dd73faf0d103052027a
300
https://github.com/pytorch/examples.git
357
def train(train_loader, model, criterion, optimizer, epoch, args): batch_time = AverageMeter('Time', ':6.3f') data_time = AverageMeter('Data', ':6.3f') losses = AverageMeter('Loss', ':.4e') top1 = AverageMeter('Acc@1', ':6.2f') top5 = AverageMeter('Acc@5', ':6.2f') progress = ProgressMeter( len(train_loader), [batch_time, data_time, losses, top1, top5], prefix="Epoch: [{}]".format(epoch)) # switch to train mode model.train() end = time.time() for i, (images, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) if args.gpu is not None: images = images.cuda(args.gpu, non_blocking=True) if torch.cuda.is_available(): target = target.cuda(args.gpu, non_blocking=True) # compute output output = model(images) loss = criterion(output, target) # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), images.size(0)) top1.update(acc1[0], images.size(0)) top5.update(acc5[0], images.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) en
43
486
train
47
0
2
20
tests/integration_tests/test_visualization_api.py
7,867
Added conditional check for UNK token insertion into category feature vocab (#2429) * Added conditional check for UNK token * Fixing test failures * Fixing more tests * Fixing hyperopt test failures * Resolve issues with server related tests * Fix serving related failures * Fix last test * Added better logging and comments * Revert to old else case code for clarity * Bump fsspec
ludwig
14
Python
41
test_visualization_api.py
def test_roc_curves_vis_api(experiment_to_use): experiment = experiment_to_use probabilities = experiment.probabilities viz_outputs = ("pdf", "png") positive_label = 1 with TemporaryDirectory() as tmpvizdir: for viz_output in viz_outputs: vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}" visualize.roc_curves( [probabilities, probabilities], experiment.ground_truth, experiment.ground_truth_metadata, experiment.output_feature_name, positive_label, model_names=["Model1", "Model2"], output_directory=tmpvizdir, file_format=viz_output, ) figure_cnt = glob.glob(vis_output_pattern_pdf) assert 1 == len(figure_cnt)
5ba06c861fb8e01729af4185f1a056f09482cdee
98
https://github.com/ludwig-ai/ludwig.git
247
def test_roc_curves_vis_api(experiment_to_use): experiment = experiment_to_use probabilities = experiment.pr
21
162
test_roc_curves_vis_api
79
1
4
46
saleor/tests/fixtures.py
28,216
Fix ORM crash when generating hundreds of search vector in SQL (#10261) (#10282) This fixes a recursion error crash when generating hundreds of `SearchVector` for a single SQL update statement. Known issue: PostgreSQL may reject the statement when thousands of `SearchVector` are being generated with the following error (fixed by #10279): ``` django.db.utils.OperationalError: stack depth limit exceeded HINT: Increase the configuration parameter "max_stack_depth" (currently 2048kB), after ensuring the platform's stack depth limit is adequate. ```
saleor
12
Python
54
fixtures.py
def product_with_two_variants(product_type, category, warehouse, channel_USD): product = Product.objects.create( name="Test product with two variants", slug="test-product-with-two-variant", product_type=product_type, category=category, ) ProductChannelListing.objects.create( product=product, channel=channel_USD, is_published=True, visible_in_listings=True, available_for_purchase_at=datetime.datetime(1999, 1, 1, tzinfo=pytz.UTC), ) variants = [ ProductVariant( product=product, sku=f"Product variant #{i}", ) for i in (1, 2) ] ProductVariant.objects.bulk_create(variants) variants_channel_listing = [ ProductVariantChannelListing( variant=variant, channel=channel_USD, price_amount=Decimal(10), cost_price_amount=Decimal(1), currency=channel_USD.currency_code, ) for variant in variants ] ProductVariantChannelListing.objects.bulk_create(variants_channel_listing) Stock.objects.bulk_create( [ Stock( warehouse=warehouse, product_variant=variant, quantity=10, ) for variant in variants ] ) product.search_vector = FlatConcat(*prepare_product_search_vector_value(product)) product.save(update_fields=["search_vector"]) return product @pytest.fixture
43765a4c1fc029a529827dd86a2d1912ac4c98b6
@pytest.fixture
209
https://github.com/saleor/saleor.git
396
def product_with_two_variants(product_type, category, warehouse, channel_USD): product = Product.objects.create( name="Test product with two variants", slug="test-product-with-two-variant", product_type=product_type, category=category, ) ProductChannelListing.objects.create( product=product, channel=channel_USD, is_published=True, visible_in_listings=True, available_for_purchase_at=datetime.datetime(1999, 1, 1, tzinfo=pytz.UTC), ) variants = [ ProductVariant( product=product, sku=f"Product variant #{i}", ) for i in (1, 2) ] ProductVariant.objects
43
318
product_with_two_variants
64
0
1
31
tests/admin_inlines/tests.py
207,175
Refs #33476 -- Reformatted code with Black.
django
10
Python
47
tests.py
def test_inline_change_m2m_view_only_perm(self): permission = Permission.objects.get( codename="view_book", content_type=self.book_ct ) self.user.user_permissions.add(permission) response = self.client.get(self.author_change_url) # View-only inlines. self.assertIs( response.context["inline_admin_formset"].has_view_permission, True ) self.assertIs( response.context["inline_admin_formset"].has_add_permission, False ) self.assertIs( response.context["inline_admin_formset"].has_change_permission, False ) self.assertIs( response.context["inline_admin_formset"].has_delete_permission, False ) self.assertContains(response, "<h2>Author-book relationships</h2>") self.assertContains( response, '<input type="hidden" name="Author_books-TOTAL_FORMS" value="1" ' 'id="id_Author_books-TOTAL_FORMS">', html=True, ) # The field in the inline is read-only. self.assertContains(response, "<p>%s</p>" % self.book) self.assertNotContains( response, '<input type="checkbox" name="Author_books-0-DELETE" id="id_Author_books-0-DELETE">', html=True, )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
152
https://github.com/django/django.git
335
def test_inline_change_m2m_view_only_perm(self): permission = Permission.objects.get( codename="view_book", content_type=self.book_ct ) self.user.user_permissions.add(permission) response = self.client.get(self.author_change_url) # View-only inlines. self.assertIs( response.context["inline_admin_formset"].has_view_permission, True ) self.assertIs( response.context["inline_admin_formset"].has_add_permission, False ) self.assertIs( response.context["inline_admin_formset"].has_change_permission, False ) self.assertIs( response.context["inline_admin_formset"].has_delete_permission, False ) self.assertContains(response, "<h2>Author-book relationships</h2>") self.assertContains( response, '<input type="hidden" name="Author_books-TOTAL_FORMS" value="1" ' 'id=
25
249
test_inline_change_m2m_view_only_perm
35
0
3
8
label_studio/tasks/serializers.py
177,560
fix: DEV-1476: Resolving performance for project storages (#1910) * Fix: DEV-1476: Resolving performance for project storages * Rewrite cache * Remove cache completely
label-studio
13
Python
27
serializers.py
def to_representation(self, instance): project = self.project(instance) if project: # resolve uri for storage (s3/gcs/etc) if self.context.get('resolve_uri', False): instance.data = instance.resolve_uri(instance.data, project) # resolve $undefined$ key in task data data = instance.data replace_task_data_undefined_with_config_field(data, project) return super().to_representation(instance)
6293c3226e3713bdae678603d6c1300e09c41448
64
https://github.com/heartexlabs/label-studio.git
125
def to_representation(self, instance): project = self.project(instance) if project: # resolve uri for storage (s3/gcs/etc) if self.context.get('resolve_uri', False): instance.data = instance.resolve_uri(instanc
10
104
to_representation
16
0
2
6
label_studio/tasks/api.py
177,672
fix: DEV-1502: Improve /api/tasks (#1945) * fix: DEV-1502: Add project filter for tasks API * Use DM response type for api/tasks (DEV-1502) * Return all task model fields (DEV-1502) * Migrate to /api/tasks * Disable /api/dm/tasks/ (DEV-1502) * Fix tests (DEV-1502) * Update LSP. Add prefetch Co-authored-by: Nick <nr@fenelon.ru> Co-authored-by: makseq-ubnt <makseq@gmail.com>
label-studio
11
Python
13
api.py
def get_serializer_context(self): context = super().get_serializer_context() project_id = self.request.data.get('project') if project_id: context['project'] = generics.get_object_or_404(Project, pk=project_id) return context
4d3013c10f89eff8837bc6d7f45b2c1b1f0486e7
46
https://github.com/heartexlabs/label-studio.git
54
def get_serializer_context(self): context = super().get_serializer_context() project_id = self.request.data.get('project') if project_id: context['project'] = generics.get_object_or_404(Project, pk=project_id) return contex
12
78
get_serializer_context
139
0
1
59
test/test_components.py
180,143
textbox-autoheight (#1009) * textbox-autoheight - add max-lines to textbox * textbox-autoheight - reformat * textbox-autoheight - add demo * textbox-autoheight - tweaks on scripts * textbox-autoheight - fix tests * textbox-autoheight - fix tests * textbox-autoheight - fix tests * textbox-autoheight - convert default max_height from 100 to 20 * textbox-autoheight - convert default max_height from 100 to 20
gradio
12
Python
79
test_components.py
def test_component_functions(self): text_input = gr.Textbox() self.assertEqual(text_input.preprocess("Hello World!"), "Hello World!") self.assertEqual(text_input.preprocess_example("Hello World!"), "Hello World!") self.assertEqual(text_input.postprocess(None), None) self.assertEqual(text_input.postprocess("Ali"), "Ali") self.assertEqual(text_input.postprocess(2), "2") self.assertEqual(text_input.postprocess(2.14), "2.14") self.assertEqual(text_input.serialize("Hello World!", True), "Hello World!") with tempfile.TemporaryDirectory() as tmpdirname: to_save = text_input.save_flagged( tmpdirname, "text_input", "Hello World!", None ) self.assertEqual(to_save, "Hello World!") restored = text_input.restore_flagged(tmpdirname, to_save, None) self.assertEqual(restored, "Hello World!") with self.assertWarns(DeprecationWarning): _ = gr.Textbox(type="number") self.assertEqual( text_input.tokenize("Hello World! Gradio speaking."), ( ["Hello", "World!", "Gradio", "speaking."], [ "World! Gradio speaking.", "Hello Gradio speaking.", "Hello World! speaking.", "Hello World! Gradio", ], None, ), ) text_input.interpretation_replacement = "unknown" self.assertEqual( text_input.tokenize("Hello World! Gradio speaking."), ( ["Hello", "World!", "Gradio", "speaking."], [ "unknown World! Gradio speaking.", "Hello unknown Gradio speaking.", "Hello World! unknown speaking.", "Hello World! Gradio unknown", ], None, ), ) self.assertEqual( text_input.get_template_context(), { "lines": 1, "max_lines": 20, "placeholder": None, "default_value": "", "name": "textbox", "label": None, "css": {}, "interactive": None, }, ) self.assertIsInstance(text_input.generate_sample(), str)
9cd4c3121fc351da57491594279c6d3abbb45482
317
https://github.com/gradio-app/gradio.git
844
def test_component_functions(self): text_input = gr.Textbox() self.assertEqual(text_input.preprocess("Hello World!"), "Hello World!") self.assertEqual(text_input.preprocess_example("Hello World!"), "Hello World!") self.assertEqual(text_input.postprocess(None), None) self.assertEqual(text_input.postprocess("Ali"), "Ali") self.assertEqual(text_input.postprocess(2), "2") self.assertEqual(text_input.postprocess(2.14), "2.14") self.assertEqual(text_input.serialize("Hello World!", True), "Hello World!") with tempfile.TemporaryDirectory() as tmpdirname: to_save = text_input.save_flagged( tmpdirname, "text_input", "Hello World!", None ) self.assertEqual(to_save, "Hello World!") restored = text_input.restore_flagged(tmpdirname, to_save, None) self.assertEqual(restored, "Hello World!") with self.assertWarns(DeprecationWarning): _ = gr.Textbox(type="number") self.assertEqual( text_input.tokenize("Hello World! Gradio speaking."), ( ["Hello", "World!", "Gradio", "speaking."], [ "World! Gradio speaking.", "Hello Gradio speaking.", "Hello World! speaking.", "Hello World! Gradio", ], None, ), ) text_input.interpretation_replacement = "unknown" self.assertEqual( text_input.tokenize("Hello World! Gradio speaking."), ( ["Hello", "World!", "Gradio", "speaking."], [ "unknown World! Gradio speaking.", "Hello unknown Gradio speaking.", "Hello World! unknown speaking.", "Hello World! Gradio unknown", ], None, ), ) self.assertEqual( text_input.get_template_context(), { "lines": 1, "max_lines": 20, "placeholder": None, "default_val
27
551
test_component_functions
24
0
1
8
tests/components/risco/test_binary_sensor.py
289,816
Add alarmed binary sensor to Risco integration (#77315) Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
core
9
Python
17
test_binary_sensor.py
async def test_error_on_connect(hass, connect_with_error, local_config_entry): await hass.config_entries.async_setup(local_config_entry.entry_id) await hass.async_block_till_done() registry = er.async_get(hass) assert not registry.async_is_registered(FIRST_ENTITY_ID) assert not registry.async_is_registered(SECOND_ENTITY_ID) assert not registry.async_is_registered(FIRST_ALARMED_ENTITY_ID) assert not registry.async_is_registered(SECOND_ALARMED_ENTITY_ID)
64eb316908f26c023d7f787b3d655c968e08cdad
67
https://github.com/home-assistant/core.git
48
async def test_error_on_connect(hass, connect_with_error, local_config_entry): await hass.config_entries.async_setup(local_config_entry.entry_id) await hass.async_block_till_done() registry = er.async_get(hass) assert not registry.async_is_registered(FIRST_ENTITY_ID) assert not registry.async_is_registered(SECOND_ENTITY_ID) assert not registry.async_is_registered(FIRST_ALARMED_ENTITY_ID) assert not registry.async_is_registered(SECOND_ALARMED_ENTITY_ID)
16
112
test_error_on_connect
34
0
1
17
components/dash-table/tests/selenium/test_markdown.py
40,100
:hocho: deprecated find_element(s)_by_css_selector
dash
14
Python
20
test_markdown.py
def test_mark002_emphasized_text(test): test.start_server(get_app()) target = test.table("table") target.column(1).sort(1) assert ( target.cell(0, "markdown-italics") .find_inside(".dash-cell-value > p > em") .get_attribute("innerHTML") == "1" ) target.column(1).sort(1) assert ( target.cell(0, "markdown-italics") .find_inside(".dash-cell-value > p > em") .get_attribute("innerHTML") == "98" )
5dfa6b0782803cb0635119ee1dcf8775dd76c8a7
89
https://github.com/plotly/dash.git
113
def test_mark002_emphasized_text(test): test.start_server(get_app()) target = test.table("table") target.column(1).sort(1) assert ( target.cell(0, "markdown-italics") .find_inside(".dash-cell-value > p > em") .get_attribute("innerHTML") == "1" ) target.column(1).sort(1) assert ( target.cell(0, "markdown-italics") .find_inside(".dash-cell-value > p > em") .get_attribute("innerHTML") == "98" )
11
160
test_mark002_emphasized_text
107
0
1
37
wagtail/core/tests/test_page_model.py
74,313
Reformat with black
wagtail
13
Python
74
test_page_model.py
def test_update_aliases(self): event_page = EventPage.objects.get(url_path="/home/events/christmas/") alias = event_page.create_alias(update_slug="new-event-page") alias_alias = alias.create_alias(update_slug="new-event-page-2") # Update the title and add a speaker event_page.title = "Updated title" event_page.draft_title = "A different draft title" event_page.speakers.add( EventPageSpeaker( first_name="Ted", last_name="Crilly", ) ) event_page.save() # Nothing should've happened yet alias.refresh_from_db() alias_alias.refresh_from_db() self.assertEqual(alias.title, "Christmas") self.assertEqual(alias_alias.title, "Christmas") self.assertEqual(alias.speakers.count(), 1) self.assertEqual(alias_alias.speakers.count(), 1) PageLogEntry.objects.all().delete() event_page.update_aliases() # Check that the aliases have been updated alias.refresh_from_db() alias_alias.refresh_from_db() self.assertEqual(alias.title, "Updated title") self.assertEqual(alias_alias.title, "Updated title") self.assertEqual(alias.speakers.count(), 2) self.assertEqual(alias_alias.speakers.count(), 2) # Draft titles shouldn't update as alias pages do not have drafts self.assertEqual(alias.draft_title, "Updated title") self.assertEqual(alias_alias.draft_title, "Updated title") # Check log entries were created self.assertTrue( PageLogEntry.objects.filter(page=alias, action="wagtail.publish").exists() ) self.assertTrue( PageLogEntry.objects.filter( page=alias_alias, action="wagtail.publish" ).exists() )
d10f15e55806c6944827d801cd9c2d53f5da4186
268
https://github.com/wagtail/wagtail.git
437
def test_update_aliases(self): event_page = EventPage.objects.get(url_path="/home/events/christmas/") alias = event_page.create_alias(update_slug="new-event-page") alias_alias = alias.create_alias(update_slug="new-event-page-2") # Update the title and add a speaker event_page.title = "Updated title" event_page.draft_title = "A different draft title" event_page.speakers.add( EventPageSpeaker( first_name="Ted", last_name="Crilly", ) ) event_page.save() # Nothing should've happened yet alias.refresh_from_db() alias_alias.refresh_from_db() self.assertEqual(alias.title, "Christmas") self.assertEqual(alias_alias.title, "Christmas") self.assertEqual(alias.speakers.count(), 1) self.assertEqual(alias_alias.speakers.count(), 1) PageLogEntry.objects.all().delete() event_page.update_aliases() # Check that the aliases have been updated alias.refresh_from_db() alias_alias.refresh_from_db() self.assertEqual(alias.title, "Updated title") self.assertEqual(alias_alias.title, "Updated title") self.assertEqual(alias.speakers.count(), 2) self.assertEqual(alias_alias.speakers.count(), 2) # Draft titles shouldn't update as alias pages do not have drafts self.assertEqual(alias.draft_title, "Updated title") self.assertEqual(alias_alias.draft_title, "Updated title") # Check log entries were created self.assertTrue( PageLogEntry.objects.filter(page=alias, action="wagtail.publish").exists() ) self.assertTrue( PageLogEntry.ob
31
461
test_update_aliases
15
0
1
6
tests/test_datasets/test_coco.py
244,446
Add Transforms
mmdetection
13
Python
15
test_coco.py
def test_coco_dataset_without_filter_cfg(self): # test CocoDataset without filter_cfg dataset = CocoDataset( data_prefix=dict(img='imgs'), ann_file='tests/data/coco_sample.json', pipeline=[]) self.assertEqual(len(dataset), 2)
2cc631f7656258dec0d12bcce459f5fe3f781b68
38
https://github.com/open-mmlab/mmdetection.git
68
def test_coco_dataset_without_filter_cfg(self): # test CocoDataset without filter_cfg dataset = CocoDataset( data_prefix=dict(img='imgs'), ann_file='tests/data/coco_sample.json', pipeline=[]) self.assertEqual(len(dataset),
11
64
test_coco_dataset_without_filter_cfg
30
0
2
7
jax/experimental/pjit.py
122,054
Implement pjit fast path in cpp for jax.Array inputs PiperOrigin-RevId: 475988677
jax
9
Python
27
pjit.py
def _python_pjit_helper(infer_params, *args, **kwargs): args_flat, _, params, _, out_tree, _ = infer_params(*args, **kwargs) for arg in args_flat: _check_arg(arg) out_flat = pjit_p.bind(*args_flat, **params) outs = tree_unflatten(out_tree, out_flat) return outs, out_flat, out_tree
405a2310ce2db325a05ba292944ec1a23e463b6c
66
https://github.com/google/jax.git
37
def _python_pjit_helper(infer_params, *args, **kwargs): args_flat, _, params, _, out_tree, _ = infer_params(*args, **kwargs) for arg in args_flat: _check_arg(arg) out_flat = pjit_p.bind(*args_flat, **p
15
97
_python_pjit_helper
37
1
1
7
tests/unit/bokeh/plotting/test_contour.py
212,429
Add contouring (#12020) * Contour demonstration * Use MultiLine with nans for contour lines * Add ContourRenderer * Try out ContourRenderer.data idea * Support different ways of specifying palettes * Contour ColorBar * Line, fill and hatch visuals on ContourColorBar * Refactor color bar classes * Use contour levels in color bar * Horizontal contour color bar * Support rendering just lines or just fill * figure.contour function * Contour level validation * Add tests, typing, docstrings * Fix codebase errors * Minimal test deps * Fix different python and js defaults for ContourRenderer * Address review comments * More review comments addressed * Fix ContourRenderer defaults and contour dataclass to dict * Update python unit tests to use dataclasses * isort fix
bokeh
10
Python
27
test_contour.py
def test_contour_colorbar(xyz_levels): x, y, z, levels = xyz_levels cr = from_contour(x, y, z, levels, fill_color="red", line_color="black") color_bar = cr.construct_color_bar() assert color_bar.levels == levels assert color_bar.fill_renderer == cr.fill_renderer assert color_bar.line_renderer == cr.line_renderer #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- @pytest.fixture
f0ea9eefccb1a1cc9b072dec7512916591eba88c
@pytest.fixture
63
https://github.com/bokeh/bokeh.git
50
def test_contour_colorbar(xyz_levels): x, y, z, levels = xyz_levels cr = from_contour(x, y, z, levels, fill_color="red", line_color="black") color_bar = cr.construct_color_bar() assert color_bar.levels == levels assert color_bar.fill_renderer == cr.fill_renderer assert color_bar.line_renderer
16
106
test_contour_colorbar
93
1
9
38
erpnext/manufacturing/doctype/production_plan/production_plan.py
66,418
style: format code with black
erpnext
13
Python
59
production_plan.py
def get_sales_orders(self): so_filter = item_filter = "" bom_item = "bom.item = so_item.item_code" date_field_mapper = { "from_date": (">=", "so.transaction_date"), "to_date": ("<=", "so.transaction_date"), "from_delivery_date": (">=", "so_item.delivery_date"), "to_delivery_date": ("<=", "so_item.delivery_date"), } for field, value in date_field_mapper.items(): if self.get(field): so_filter += f" and {value[1]} {value[0]} %({field})s" for field in ["customer", "project", "sales_order_status"]: if self.get(field): so_field = "status" if field == "sales_order_status" else field so_filter += f" and so.{so_field} = %({field})s" if self.item_code and frappe.db.exists("Item", self.item_code): bom_item = self.get_bom_item() or bom_item item_filter += " and so_item.item_code = %(item_code)s" open_so = frappe.db.sql( f, self.as_dict(), as_dict=1, ) return open_so @frappe.whitelist()
494bd9ef78313436f0424b918f200dab8fc7c20b
@frappe.whitelist()
158
https://github.com/frappe/erpnext.git
67
def get_sales_orders(self): so_filter = item_filter = "" bom_item = "bom.item = so_item.item_code" date_field_mapper = { "from_date"
20
329
get_sales_orders
50
0
2
13
t/integration/test_canvas.py
208,083
Canvas Header Stamping (#7384) * Strip down the header-stamping PR to the basics. * Serialize groups. * Add groups to result backend meta data. * Fix spelling mistake. * Revert changes to canvas.py * Revert changes to app/base.py * Add stamping implementation to canvas.py * Send task to AMQP with groups. * Successfully pass single group to result. * _freeze_gid dict merge fixed * First draft of the visitor API. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * OptionsVisitor created * Fixed canvas.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test for simple test for chord and fixed chord implementation * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed lint and elements * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type -> isinstance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Redo header stamping (#7341) * _freeze_gid dict merge fixed * OptionsVisitor created * Fixed canvas.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test for simple test for chord and fixed chord implementation * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed lint and elements * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type -> isinstance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Omer Katz <omer.katz@omerkatz.com> * Added stamping mechanism * Manual stamping improved * flake8 fixed * Added subtests * Add comma. * Moved groups to stamps * Fixed chord and added test for that * Strip down the header-stamping PR to the basics. * Serialize groups. * Add groups to result backend meta data. * Fix spelling mistake. * Revert changes to canvas.py * Revert changes to app/base.py * Add stamping implementation to canvas.py * Send task to AMQP with groups. * Successfully pass single group to result. * _freeze_gid dict merge fixed * First draft of the visitor API. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * OptionsVisitor created * Fixed canvas.py * Added test for simple test for chord and fixed chord implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * Fixed lint and elements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * type -> isinstance * Added stamping mechanism * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Manual stamping improved * fail_ci_if_error uncommented * flake8 fixed * Added subtests * Changes * Add comma. * Fixed chord and added test for that * canvas.py fixed * Test chord.py fixed * Fixed stamped_headers * collections import fixed * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * collections import fixed * Update celery/backends/base.py Co-authored-by: Omer Katz <omer.katz@omerkatz.com> * ampq.py fixed * Refrain from using deprecated import path. * Fix test_complex_chain regression. Whenever we stamp a group we need to freeze it first if it wasn't already frozen. Somewhere along the line, the group id changed because we were freezing twice. This commit places the stamping operation after preparing the chain's steps which fixes the problem somehow. We don't know why yet. * Fixed integration tests * Fixed integration tests * Fixed integration tests * Fixed integration tests * Fixed issues with maybe_list. Add documentation * Fixed potential issue with integration tests * Fixed issues with _regen * Fixed issues with _regen * Fixed test_generator issues * Fixed _regen stamping * Fixed _regen stamping * Fixed TimeOut issue * Fixed TimeOut issue * Fixed TimeOut issue * Update docs/userguide/canvas.rst Co-authored-by: Omer Katz <omer.katz@omerkatz.com> * Fixed Couchbase * Better stamping intro * New GroupVisitor example * Adjust documentation. Co-authored-by: Naomi Elstein <naomi.els@omerkatz.com> Co-authored-by: Omer Katz <omer.katz@omerkatz.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Asif Saif Uddin <auvipy@gmail.com> Co-authored-by: Omer Katz <omer.katz@kcg.tech>
celery
12
Python
45
test_canvas.py
def test_nested_group_chord_counting_chord(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) gchild_count = 42 gchild_sig = chord( (identity.si(1337),) * gchild_count, identity.si(31337), ) child_chord = chord((gchild_sig,), identity.s()) group_sig = group((child_chord,)) res = group_sig.delay() # Wait for the result to land and confirm its value is as expected assert res.get(timeout=TIMEOUT) == [[31337]]
1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc
108
https://github.com/celery/celery.git
152
def test_nested_group_chord_counting_chord(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) gchild_count = 42 gchild_sig = chord( (identity.si(1337),) * gchild_count, identity.si(31337), ) child_chord = chord((gchild_sig,), identity.s()) group_sig = group((child_chord,)) res = group_sig.delay() # Wait for the result to land and confirm its value is as expected assert
25
171
test_nested_group_chord_counting_chord
13
0
1
4
tests/test_serializers.py
59,157
Remove deep serialization from `PickleSerializer` and add tests (#7044)
prefect
10
Python
12
test_serializers.py
def test_simple_roundtrip_with_builtin_pickle(self, data): serializer = PickleSerializer(picklelib="pickle") serialized = serializer.dumps(data) assert serializer.loads(serialized) == data
7092f0403a97154d3c3909e3fcd95e7db5776246
32
https://github.com/PrefectHQ/prefect.git
33
def test_simple_roundtrip_with_builtin_pickle(self, data): serializer = PickleSerializer(picklelib="pickle") serialized = serializer.du
9
53
test_simple_roundtrip_with_builtin_pickle
30
0
3
7
rllib/models/torch/torch_action_dist.py
143,802
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
15
Python
24
torch_action_dist.py
def deterministic_sample(self) -> TensorType: arr = [torch.argmax(cat.probs, -1) for cat in self.cats] sample_ = torch.stack(arr, dim=1) if isinstance(self.action_space, gym.spaces.Box): sample_ = torch.reshape(sample_, [-1] + list(self.action_space.shape)) self.last_sample = sample_ return sample_
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
83
https://github.com/ray-project/ray.git
75
def deterministic_sample(self) -> TensorType: arr = [torch.argmax(cat.probs, -1) for cat in self.cats]
21
127
deterministic_sample
27
0
1
20
test/test_pipeline_yaml.py
257,267
Validate YAML files without loading the nodes (#2438) * Remove BasePipeline and make a module for RayPipeline * Can load pipelines from yaml, plenty of issues left * Extract graph validation logic into _add_node_to_pipeline_graph & refactor load_from_config and add_node to use it * Fix pipeline tests * Move some tests out of test_pipeline.py and create MockDenseRetriever * myoy and pylint (silencing too-many-public-methods) * Fix issue found in some yaml files and in schema files * Fix paths to YAML and fix some typos in Ray * Fix eval tests * Simplify MockDenseRetriever * Fix Ray test * Accidentally pushed merge coinflict, fixed * Typo in schemas * Typo in _json_schema.py * Slightly reduce noisyness of version validation warnings * Fix version logs tests * Fix version logs tests again * remove seemingly unused file * Add check and test to avoid adding the same node to the pipeline twice * Update Documentation & Code Style * Revert config to pipeline_config * Remo0ve unused import * Complete reverting to pipeline_config * Some more stray config= * Update Documentation & Code Style * Feedback * Move back other_nodes tests into pipeline tests temporarily * Update Documentation & Code Style * Fixing tests * Update Documentation & Code Style * Fixing ray and standard pipeline tests * Rename colliding load() methods in dense retrievers and faiss * Update Documentation & Code Style * Fix mypy on ray.py as well * Add check for no root node * Fix tests to use load_from_directory and load_index * Try to workaround the disabled add_node of RayPipeline * Update Documentation & Code Style * Fix Ray test * Fix FAISS tests * Relax class check in _add_node_to_pipeline_graph * Update Documentation & Code Style * Try to fix mypy in ray.py * unused import * Try another fix for Ray * Fix connector tests * Update Documentation & Code Style * Fix ray * Update Documentation & Code Style * use BaseComponent.load() in pipelines/base.py * another round of feedback * stray BaseComponent.load() * Update Documentation & Code Style * Fix FAISS tests too Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: tstadel <60758086+tstadel@users.noreply.github.com>
haystack
12
Python
22
test_pipeline_yaml.py
def test_load_yaml_incompatible_version(tmp_path, caplog): with open(tmp_path / "tmp_config.yml", "w") as tmp_file: tmp_file.write( ) with caplog.at_level(logging.WARNING): Pipeline.load_from_yaml(path=tmp_path / "tmp_config.yml") assert "version '1.1.0'" in caplog.text assert f"Haystack {haystack.__version__}" in caplog.text
f8e02310bf0dfbd1ab79a1c3c73434e0aeba4f4b
58
https://github.com/deepset-ai/haystack.git
79
def test_load_yaml_incompatible_version(tmp_path, caplog): with open(tmp_path / "tmp_config.yml", "w") as tmp_file: tmp_file.write( ) with caplog.at_level(logging.WARNING): Pipeline.load_from_yaml(path=tmp_path / "tmp_config.yml") assert "version '1.1.0'" in caplog.text
15
113
test_load_yaml_incompatible_version
1,945
0
1
10
python3.10.4/Lib/encodings/iso8859_4.py
217,040
add python 3.10.4 for windows
XX-Net
11
Python
677
iso8859_4.py
def getregentry(): return codecs.CodecInfo( name='iso8859-4', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x04' # 0x04 -> END OF TRANSMISSION '\x05' # 0x05 -> ENQUIRY '\x06' # 0x06 -> ACKNOWLEDGE '\x07' # 0x07 -> BELL '\x08' # 0x08 -> BACKSPACE '\t' # 0x09 -> HORIZONTAL TABULATION '\n' # 0x0A -> LINE FEED '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x14' # 0x14 -> DEVICE CONTROL FOUR '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x16 -> SYNCHRONOUS IDLE '\x17' # 0x17 -> END OF TRANSMISSION BLOCK '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x1a' # 0x1A -> SUBSTITUTE '\x1b' # 0x1B -> ESCAPE '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> DELETE '\x80' # 0x80 -> <control> '\x81' # 0x81 -> <control> '\x82' # 0x82 -> <control> '\x83' # 0x83 -> <control> '\x84' # 0x84 -> <control> '\x85' # 0x85 -> <control> '\x86' # 0x86 -> <control> '\x87' # 0x87 -> <control> '\x88' # 0x88 -> <control> '\x89' # 0x89 -> <control> '\x8a' # 0x8A -> <control> '\x8b' # 0x8B -> <control> '\x8c' # 0x8C -> <control> '\x8d' # 0x8D -> <control> '\x8e' # 0x8E -> <control> '\x8f' # 0x8F -> <control> '\x90' # 0x90 -> <control> '\x91' # 0x91 -> <control> '\x92' # 0x92 -> <control> '\x93' # 0x93 -> <control> '\x94' # 0x94 -> <control> '\x95' # 0x95 -> <control> '\x96' # 0x96 -> <control> '\x97' # 0x97 -> <control> '\x98' # 0x98 -> <control> '\x99' # 0x99 -> <control> '\x9a' # 0x9A -> <control> '\x9b' # 0x9B -> <control> '\x9c' # 0x9C -> <control> '\x9d' # 0x9D -> <control> '\x9e' # 0x9E -> <control> '\x9f' # 0x9F -> <control> '\xa0' # 0xA0 -> NO-BREAK SPACE '\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK '\u0138' # 0xA2 -> LATIN SMALL LETTER KRA '\u0156' # 0xA3 -> LATIN CAPITAL LETTER R WITH CEDILLA '\xa4' # 0xA4 -> CURRENCY SIGN '\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE '\u013b' # 0xA6 -> LATIN CAPITAL LETTER L WITH CEDILLA '\xa7' # 0xA7 -> SECTION SIGN '\xa8' # 0xA8 -> DIAERESIS '\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON '\u0112' # 0xAA -> LATIN CAPITAL LETTER E WITH MACRON '\u0122' # 0xAB -> LATIN CAPITAL LETTER G WITH CEDILLA '\u0166' # 0xAC -> LATIN CAPITAL LETTER T WITH STROKE '\xad' # 0xAD -> SOFT HYPHEN '\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON '\xaf' # 0xAF -> MACRON '\xb0' # 0xB0 -> DEGREE SIGN '\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK '\u02db' # 0xB2 -> OGONEK '\u0157' # 0xB3 -> LATIN SMALL LETTER R WITH CEDILLA '\xb4' # 0xB4 -> ACUTE ACCENT '\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE '\u013c' # 0xB6 -> LATIN SMALL LETTER L WITH CEDILLA '\u02c7' # 0xB7 -> CARON '\xb8' # 0xB8 -> CEDILLA '\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON '\u0113' # 0xBA -> LATIN SMALL LETTER E WITH MACRON '\u0123' # 0xBB -> LATIN SMALL LETTER G WITH CEDILLA '\u0167' # 0xBC -> LATIN SMALL LETTER T WITH STROKE '\u014a' # 0xBD -> LATIN CAPITAL LETTER ENG '\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON '\u014b' # 0xBF -> LATIN SMALL LETTER ENG '\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE '\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK '\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE '\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS '\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\u012a' # 0xCF -> LATIN CAPITAL LETTER I WITH MACRON '\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE '\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA '\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON '\u0136' # 0xD3 -> LATIN CAPITAL LETTER K WITH CEDILLA '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xd7' # 0xD7 -> MULTIPLICATION SIGN '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE '\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS '\u0168' # 0xDD -> LATIN CAPITAL LETTER U WITH TILDE '\u016a' # 0xDE -> LATIN CAPITAL LETTER U WITH MACRON '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S '\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE '\xe6' # 0xE6 -> LATIN SMALL LETTER AE '\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK '\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE '\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS '\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\u012b' # 0xEF -> LATIN SMALL LETTER I WITH MACRON '\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE '\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA '\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON '\u0137' # 0xF3 -> LATIN SMALL LETTER K WITH CEDILLA '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS '\xf7' # 0xF7 -> DIVISION SIGN '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE '\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS '\u0169' # 0xFD -> LATIN SMALL LETTER U WITH TILDE '\u016b' # 0xFE -> LATIN SMALL LETTER U WITH MACRON '\u02d9' # 0xFF -> DOT ABOVE ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
8198943edd73a363c266633e1aa5b2a9e9c9f526
46
https://github.com/XX-net/XX-Net.git
4,232
def getregentry(): return codecs.CodecInfo( name='iso8859-4', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x04' # 0x04 -> END OF TRANSMISSION '\x05' # 0x05 -> ENQUIRY '\x06' # 0x06 -> ACKNOWLEDGE '\x07' # 0x07 -> BELL '\x08' # 0x08 -> BACKSPACE '\t' # 0x09 -> HORIZONTAL TABULATION '\n' # 0x0A -> LINE FEED '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x14' # 0x14 -> DEVICE CONTROL FOUR '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x16 -> SYNCHRONOUS IDLE '\x17' # 0x17 -> END OF TRANSMISSION BLOCK '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x1a' # 0x1A -> SUBSTITUTE '\x1b' # 0x1B -> ESCAPE '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> DELETE '\x80' # 0x80 -> <control> '\x81' # 0x81 -> <control> '\x82' # 0x82 -> <control> '\x83' # 0x83 -> <control> '\x84' # 0x84 -> <control> '\x85' # 0x85 -> <control> '\x86' # 0x86 -> <control> '\x87' # 0x87 -> <control> '\x88' # 0x88 -> <control> '\x89' # 0x89 -> <control> '\x8a' # 0x8A -> <control> '\x8b' # 0x8B -> <control> '\x8c' # 0x8C -> <control> '\x8d' # 0x8D -> <control> '\x8e' # 0x8E -> <control> '\x8f' # 0x8F -> <control> '\x90' # 0x90 -> <control> '\x91' # 0x91 -> <control> '\x92' # 0x92 -> <control> '\x93' # 0x93 -> <control> '\x94' # 0x94 -> <control> '\x95' # 0x95 -> <control> '\x96' # 0x96 -> <control> '\x97' # 0x97 -> <control> '\x98' # 0x98 -> <control> '\x99' # 0x99 -> <control> '\x9a' # 0x9A -> <control> '\x9b' # 0x9B -> <control> '\x9c' # 0x9C -> <control> '\x9d' # 0x9D -> <control> '\x9e' # 0x9E -> <control> '\x9f' # 0x9F -> <control> '\xa0' # 0xA0 -> NO-BREAK SPACE '\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK '\u0138' # 0xA2 -> LATIN SMALL LETTER KRA '\u0156' # 0xA3 -> LATIN CAPITAL LETTER R WITH CEDILLA '\xa4' # 0xA4 -> CURRENCY SIGN '\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE '\u013b' # 0xA6 -> LATIN CAPITAL LETTER L WITH CEDILLA '\xa7' # 0xA7 -> SECTION SIGN '\xa8' # 0xA8 -> DIAERESIS '\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON '\u0112' # 0xAA -> LATIN CAPITAL LETTER E WITH MACRON '\u0122' # 0xAB -> LATIN CAPITAL LETTER G WITH CEDILLA '\u0166' # 0xAC -> LATIN CAPITAL LETTER T WITH STROKE '\xad' # 0xAD -> SOFT HYPHEN '\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON '\xaf' # 0xAF -> MACRON '\xb0' # 0xB0 -> DEGREE SIGN '\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK '\u02db' # 0xB2 -> OGONEK '\u0157' # 0xB3 -> LATIN SMALL LETTER R WITH CEDILLA '\xb4' # 0xB4 -> ACUTE ACCENT '\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE '\u013c' # 0xB6 -> LATIN SMALL LETTER L WITH CEDILLA '\u02c7' # 0xB7 -> CARON '\xb8' # 0xB8 -> CEDILLA '\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON '\u0113' # 0xBA -> LATIN SMALL LETTER E WITH MACRON '\u0123' # 0xBB -> LATIN SMALL LETTER G WITH CEDILLA '\u0167' # 0xBC -> LATIN SMALL LETTER T WITH STROKE '\u014a' # 0xBD -> LATIN CAPITAL LETTER ENG '\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON '\u014b' # 0xBF -> LATIN SMALL LETTER ENG '\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE '\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK '\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE '\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS '\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\u012a' # 0xCF -> LATIN CAPITAL LETTER I WITH MACRON '\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE '\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA '\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON '\u0136' # 0xD3 -> LATIN CAPITAL LETTER K WITH CEDILLA '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xd7' # 0xD7 -> MULTIPLICATION SIGN '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE '\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS '\u0168' # 0xDD -> LATIN CAPITAL LETTER U WITH TILDE '\u016a' # 0xDE -> LATIN CAPITAL LETTER U WITH MACRON '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S '\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE '\xe6' # 0xE6 -> LATIN SMALL LETTER AE '\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK '\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH
18
1,278
getregentry
33
0
3
13
pipenv/utils/spinner.py
21,137
Removed usage of fs_str from vistir (#5062) * Removed usage of fs_str from vistir This function was all about compatability of py2-py3.3 versions. Later versions don't need it. * Explicitly convert dict values to strings * Add news fragment
pipenv
11
Python
30
spinner.py
def create_spinner(text, setting, nospin=None, spinner_name=None): from pipenv.vendor.vistir import spin if not spinner_name: spinner_name = setting.PIPENV_SPINNER if nospin is None: nospin = setting.PIPENV_NOSPIN with spin.create_spinner( spinner_name=spinner_name, start_text=text, nospin=nospin, write_to_stdout=False, ) as sp: yield sp
2bf70b74167868133809a926aa6393438fb06db4
69
https://github.com/pypa/pipenv.git
96
def create_spinner(text, setting, nospin=None, spinner_name=None): from pipenv.vendor.vistir import spin if not spinner_name: spinner_name = setting.PIPENV_SPINNER if nospin is None: nospin = setting.PIPENV_NOSPIN with spin.create_spinner( spinner_name=spinner_name, start_text=text,
14
105
create_spinner
28
0
1
8
mmdet/models/task_modules/coders/tblr_bbox_coder.py
245,724
[Refactor] Refactor anchor head and base head with boxlist (#8625) * Refactor anchor head * Update * Update * Update * Add a series of boxes tools * Fix box type to support n x box_dim boxes * revert box type changes * Add docstring * refactor retina_head * Update * Update * Fix comments * modify docstring of coder and ioucalculator * Replace with_boxlist with use_box_type
mmdetection
10
Python
21
tblr_bbox_coder.py
def encode(self, bboxes, gt_bboxes): bboxes = get_box_tensor(bboxes) gt_bboxes = get_box_tensor(gt_bboxes) assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = bboxes2tblr( bboxes, gt_bboxes, normalizer=self.normalizer) return encoded_bboxes
d915740fa8228cf57741b27d9e5d66e358456b8e
70
https://github.com/open-mmlab/mmdetection.git
88
def encode(self, bboxes, gt_bboxes): bboxes =
9
110
encode
6
0
1
2
networkx/algorithms/tests/test_lowest_common_ancestors.py
177,407
Renamed test functions in test_lowest_common_ancestors (#6110) * Renamed test functions in test_lowest_common_ancestors * Updated test method names. * Removed redundant docstrings * Minor touchups. Co-authored-by: Ross Barnowski <rossbar@berkeley.edu>
networkx
11
Python
6
test_lowest_common_ancestors.py
def test_tree_all_pairs_lca_default_root(self): assert dict(tree_all_pairs_lca(self.DG)) == self.ans
3724ba4ebee5b1cec2e36faab30777bfdc16a6fd
19
https://github.com/networkx/networkx.git
12
def test_tree_all_pairs_lca_default_root(self): assert dict(tree_all_pairs_lca(sel
6
31
test_tree_all_pairs_lca_default_root
12
0
3
18
homeassistant/scripts/benchmark/__init__.py
300,864
Clean up accessing event helpers via hass (#72011)
core
8
Python
10
__init__.py
async def state_changed_helper(hass): count = 0 entity_id = "light.kitchen" event = asyncio.Event()
8f4caf414124f380a8f5e1d54aedb54a8f6c5c05
114
https://github.com/home-assistant/core.git
24
async def state_changed_helper(hass): count = 0 entity_id = "light.kitchen" event = asyncio.Event()
7
38
state_changed_helper
58
0
3
10
lib/matplotlib/backend_bases.py
107,498
DOC: More cleanup axes -> Axes
matplotlib
10
Python
49
backend_bases.py
def _update_view(self): nav_info = self._nav_stack() if nav_info is None: return # Retrieve all items at once to avoid any risk of GC deleting an Axes # while in the middle of the loop below. items = list(nav_info.items()) for ax, (view, (pos_orig, pos_active)) in items: ax._set_view(view) # Restore both the original and modified positions ax._set_position(pos_orig, 'original') ax._set_position(pos_active, 'active') self.canvas.draw_idle()
f156db08eee54d285ab0fb4e031e48d078ba6aa3
73
https://github.com/matplotlib/matplotlib.git
169
def _update_view(self): nav_info = self._nav_stack() if nav_info is None: return # Retrieve all items at once to avoid any risk of GC deleting an Axes # while in the middle of the loop below. items = list(nav_info.items())
14
125
_update_view
32
0
2
12
src/sentry/api/serializers/models/sentry_function.py
94,074
Sentry Functions: Webhooks Migrations (#37313) * feat(integrations): new field for sentry_functions table * fix(integrations): add working integration, no default
sentry
9
Python
30
sentry_function.py
def serialize(self, obj, attrs, user): events = [event for event in obj.events] data = { "name": obj.name, "slug": obj.slug, "author": obj.author, "code": obj.code, "overview": obj.overview, "external_id": obj.external_id, "events": events, } return data
e4f3e0a2e26224c5b8883c03ac81f08e99f1bc5b
68
https://github.com/getsentry/sentry.git
136
def serialize(self, obj, attrs, user): events = [event for event in obj.events] data = { "name": obj.name, "slug": obj.slug, "author": obj.author, "code": obj.code, "overview":
14
108
serialize
8
0
1
3
code/default/lib/noarch/front_base/openssl_wrap.py
219,026
v4.6.0 compactiable with python 2.7.
XX-Net
9
Python
8
openssl_wrap.py
def notbefore(self): t = self.x509.get_notBefore() return datetime.datetime.strptime(t, "%Y%m%d%H%M%SZ")
0820c040ec2815f40bd0e469e27c2bf4d2cc33bc
25
https://github.com/XX-net/XX-Net.git
21
def notbefore(self):
7
42
notbefore
33
1
2
7
python/ray/data/tests/test_dataset_numpy.py
128,311
[Datasets] Add `partitioning` parameter to `read_` functions (#28413)
ray
12
Python
30
test_dataset_numpy.py
def test_numpy_read_partitioning(ray_start_regular_shared, tmp_path): path = os.path.join(tmp_path, "country=us", "data.npy") os.mkdir(os.path.dirname(path)) np.save(path, np.arange(4).reshape([2, 2])) ds = ray.data.read_numpy(path, partitioning=Partitioning("hive")) assert ds.schema().names == ["data", "country"] assert [r["country"] for r in ds.take()] == ["us", "us"] @pytest.mark.parametrize("from_ref", [False, True])
c3ff77f5a13395631a2af580ea4429ceb5dfea13
@pytest.mark.parametrize("from_ref", [False, True])
108
https://github.com/ray-project/ray.git
49
def test_numpy_read_partitioning(ray_start_regular_shared, tmp_path): path = os.path.join(tmp_path, "country=us", "data.npy") os.mkdir(os.path.dirname(path)) np.save(path, np.arange(4).reshape([
25
202
test_numpy_read_partitioning
8
0
1
19
src/datasets/fingerprint.py
106,062
Clean up Dataset and DatasetDict (#5344) * clean up docstrings * make style * apply review Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
datasets
7
Python
8
fingerprint.py
def is_caching_enabled() -> bool: global _CACHING_ENABLED return bool(_CACHING_ENABLED)
cd3169f3f35afcf73a36a8276113e1881d92e5e0
14
https://github.com/huggingface/datasets.git
17
def is_caching_enabled() -> bool: global _C
3
26
is_caching_enabled
23
0
1
12
tests/components/subaru/test_init.py
289,678
Code quality update for Subaru sensors (#79482) * Use distance device class for sensors * Change sensor name casing and unique_id * Migrate sensor entity unique_id * Match title-cased unique_id when migrating * Remove unneeded regex to find '_' delimited id suffix * Incorporate PR review comments * Add check to prevent extra odometer entity migration
core
12
Python
21
test_init.py
async def test_invalid_credentials(hass, subaru_config_entry): await setup_subaru_config_entry( hass, subaru_config_entry, connect_effect=InvalidCredentials("Invalid Credentials"), vehicle_list=[TEST_VIN_2_EV], vehicle_data=VEHICLE_DATA[TEST_VIN_2_EV], vehicle_status=VEHICLE_STATUS_EV, ) check_entry = hass.config_entries.async_get_entry(subaru_config_entry.entry_id) assert check_entry assert check_entry.state is ConfigEntryState.SETUP_ERROR
073951177b31f90be7232b03df0fd4db77cb3089
62
https://github.com/home-assistant/core.git
83
async def test_invalid_credentials(hass, subaru_config_entry): await setup_subaru_config_entry( hass, subaru_config_entry, connect_effect=InvalidCredentials("Invalid Credential
19
96
test_invalid_credentials
27
0
4
6
lib/mpl_toolkits/axes_grid1/axes_divider.py
109,157
Improve argument checking
matplotlib
11
Python
25
axes_divider.py
def set_anchor(self, anchor): if isinstance(anchor, str): _api.check_in_list(mtransforms.Bbox.coefs, anchor=anchor) elif not isinstance(anchor, (tuple, list)) or len(anchor) != 2: raise TypeError("anchor must be str or 2-tuple") self._anchor = anchor
e94dfed864a8bbeb215bab5705a490325ac07819
60
https://github.com/matplotlib/matplotlib.git
77
def set_anchor(self, anchor): if isinstance(anchor, str): _api.check_in_list(mtransforms.Bbox.coefs, anchor=anchor) elif not isinstance(anchor, (tupl
15
97
set_anchor
35
0
1
14
homeassistant/components/landisgyr_heat_meter/config_flow.py
307,469
Landis+Gyr integration: increase timeout and add debug logging (#78025)
core
9
Python
30
config_flow.py
async def validate_and_create_entry(self, dev_path): model, device_number = await self.validate_ultraheat(dev_path) _LOGGER.debug("Got model %s and device_number %s", model, device_number) await self.async_set_unique_id(device_number) self._abort_if_unique_id_configured() data = { CONF_DEVICE: dev_path, "model": model, "device_number": device_number, } return self.async_create_entry( title=model, data=data, )
93b7f604d5e94c38964dca47daa2f84c9bc253f0
71
https://github.com/home-assistant/core.git
153
async def validate_and_create_entry(self, dev_path): model, device_number = await self.validate_ultraheat(dev_path) _LOGGER.debug("Got model %s and device_number %s", model, device_number) await self.async_set_unique_id(device_number) self._abort_if_unique_id_configured()
14
117
validate_and_create_entry
61
0
4
9
mkdocs/contrib/search/search_index.py
224,646
Cleanup: replace unnecessary list comprehensions (#2949)
mkdocs
12
Python
46
search_index.py
def handle_starttag(self, tag, attrs): # We only care about the opening tag for headings. if tag not in _HEADER_TAGS: return # We are dealing with a new header, create a new section # for it and assign the ID if it has one. self.is_header_tag = True self.section = ContentSection() self.data.append(self.section) for attr in attrs: if attr[0] == "id": self.section.id = attr[1]
3035ad18f1706c262bf0efbc2c7fa9832f523584
62
https://github.com/mkdocs/mkdocs.git
161
def handle_starttag(self, tag, attrs): # We only care about the opening tag for headings. if tag not in _HEADER_TAGS: return # We are dealing with a new header, create a new section
12
103
handle_starttag
18
0
1
4
saleor/payment/tests/test_payment.py
29,013
Drop `AnonymouUser` from the context, and assign None instead (#10575) * Fix error when app deleted product added to draft order; Fixes #10574 * Get rid of AnonymousUser from context * Ger rid of AnonymousUser * Drop anonymous_user fixture * Clean events * Fix test_checkout_complete.py file * Drop changelog entry * Update resolver for me query * Apply code review remarks * Apply changes after rebasing with main branch * Fix review remarks * Update create order from checkout tests * Drop remaining uses of is_anonymous Co-authored-by: IKarbowiak <iga.karbowiak@mirumee.com>
saleor
9
Python
14
test_payment.py
def test_payment_owned_by_user_anonymous_user(payment): # given user = None # when is_owned = payment_owned_by_user(payment.pk, user) # then assert not is_owned
b8598fa2cf84f8bb473f2066f075ad7a374c3c80
21
https://github.com/saleor/saleor.git
35
def test_payment_owned_by_user_anonymous_user(payment): # given user = None # when is_owned = payment_owned_by_user(payment.p
6
37
test_payment_owned_by_user_anonymous_user
168
1
1
7
tests/sentry/snuba/test_profiles.py
87,518
feat(profiling): Introduce profile timeseries query builder (#40745) Analogous to #40557, this introduces a new query builder to be able to return timeseries data.
sentry
14
Python
77
test_profiles.py
def test_aggregate_resolution(query_builder_fn, params, field, resolved): builder = query_builder_fn( dataset=Dataset.Profiles, params=params, selected_columns=[field], ) assert builder.columns == [resolved] @pytest.mark.parametrize( "field,message", [ pytest.param("foo", "Unknown field: foo", id="foo"), pytest.param("count(id)", "count: expected 0 argument\\(s\\)", id="count(id)"), pytest.param( "count_unique(foo)", "count_unique: column argument invalid: foo is not a valid column", id="count_unique(foo)", ), *[ pytest.param( f"p{qt}(foo)", f"p{qt}: column argument invalid: foo is not a valid column", id=f"p{qt}(foo)", ) for qt in ["50", "75", "95", "99"] ], *[ pytest.param( f"p{qt}(id)", f"p{qt}: column argument invalid: id is not a numeric column", id=f"p{qt}(id)", ) for qt in ["50", "75", "95", "99"] ], pytest.param( "percentile(foo,0.25)", "percentile: column argument invalid: foo is not a valid column", id="percentile(foo,0.25)", ), pytest.param( "percentile(id,0.25)", "percentile: column argument invalid: id is not a numeric column", id="percentile(id,0.25)", ), *[ pytest.param( f"{fn}(foo)", f"{fn}: column argument invalid: foo is not a valid column", id=f"{fn}(foo)", ) for fn in ["min", "max", "avg", "sum"] ], *[ pytest.param( f"{fn}(id)", f"{fn}: column argument invalid: id is not a numeric column", id=f"{fn}(id)", ) for fn in ["min", "max", "avg", "sum"] ], ], ) @query_builder_fns() @pytest.mark.django_db
1dab08bfd4006ccdccfeda9623cc5d60c6adb63c
@pytest.mark.parametrize( "field,message", [ pytest.param("foo", "Unknown field: foo", id="foo"), pytest.param("count(id)", "count: expected 0 argument\\(s\\)", id="count(id)"), pytest.param( "count_unique(foo)", "count_unique: column argument invalid: foo is not a valid column", id="count_unique(foo)", ), *[ pytest.param( f"p{qt}(foo)", f"p{qt}: column argument invalid: foo is not a valid column", id=f"p{qt}(foo)", ) for qt in ["50", "75", "95", "99"] ], *[ pytest.param( f"p{qt}(id)", f"p{qt}: column argument invalid: id is not a numeric column", id=f"p{qt}(id)", ) for qt in ["50", "75", "95", "99"] ], pytest.param( "percentile(foo,0.25)", "percentile: column argument invalid: foo is not a valid column", id="percentile(foo,0.25)", ), pytest.param( "percentile(id,0.25)", "percentile: column argument invalid: id is not a numeric column", id="percentile(id,0.25)", ), *[ pytest.param( f"{fn}(foo)", f"{fn}: column argument invalid: foo is not a valid column", id=f"{fn}(foo)", ) for fn in ["min", "max", "avg", "sum"] ], *[ pytest.param( f"{fn}(id)", f"{fn}: column argument invalid: id is not a numeric column", id=f"{fn}(id)", ) for fn in ["min", "max", "avg", "sum"] ], ], ) @query_builder_fns() @pytest.mark.django_db
40
https://github.com/getsentry/sentry.git
725
def test_aggregate_resolution(query_builder_fn, params, field, resolved): builder = query_builder_fn( dataset=Dataset.Profiles, params=params, selected_columns=[field], ) assert builder.columns == [resolved] @pytest.mark.parametrize( "field,message", [ pytest.param("foo", "Unknown field: foo", id="foo"), pytest.param("count(id)", "count: expected 0 argument\\(s\\)", id="count(id)"), pytest.param( "count_unique(foo)", "count_unique: column argument invalid: foo is not a valid column", id="count_unique(foo)", ), *[ pytest.param( f"p{qt}(foo)", f"p{qt}: column argument invalid: foo is not a valid column", id=f"p{qt}(foo)", ) for qt in ["50", "75", "95", "99"] ], *[ pytest.param( f"p{qt}(id)", f"p{qt}: column argument invalid: id is not a numeric column",
20
467
test_aggregate_resolution
35
0
1
15
kivy/tests/test_logger.py
194,652
Refactored logging.ColoredFormatter to avoid deepcopy. (#7962) * Refactor ColoredFormatter Removed old formatter and support code. Added 3 LogRecord shims, new formatter, new unit tests for above and a unit test that used to fail to confirm bugs have been fixed. * PEP8 fixes Match project-style. * PEP8 fixes (I can't run `black` on these files without making the review too hard.) * PEP8 Fixes Note to self: Do a `black` refactor of key files so I don't get stuck in this loop again.
kivy
11
Python
34
test_logger.py
def test_colonsplittinglogrecord_without_colon(): from kivy.logger import ColonSplittingLogRecord originallogrecord = logging.LogRecord( name="kivy.test", level=logging.DEBUG, pathname="test.py", lineno=1, msg="Part1 Part2 Part 3", args=("args",), exc_info=None, func="test_colon_splitting", sinfo=None, ) shimmedlogrecord = ColonSplittingLogRecord(originallogrecord) # No colons means no change. assert str(originallogrecord) == str(shimmedlogrecord)
c5ff6db790f738a0e2d5f1dc91c5d883791357d3
74
https://github.com/kivy/kivy.git
115
def test_colonsplittinglogrecord_without_colon(): from kivy.logger import ColonSplittingLogRecord originallogrecord = logging.LogRecord( name="kivy.test", level=logging.DEBUG, pathname="test.py", lineno=1, msg="Part1 Part2 Part 3", args=("args",), exc_info=None, func="test_colon_splitting", sinfo=None, ) shimmedlogrecord = ColonSplittingLogRecord(originallogrecord) # No colons means no change. assert str(originallogrecord) == str(shimmedlogrecord)
19
118
test_colonsplittinglogrecord_without_colon
14
0
1
8
wagtail/documents/views/multiple.py
74,911
Reformat with black
wagtail
13
Python
13
multiple.py
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update( { "max_title_length": self.form.fields["title"].max_length, } ) return context
d10f15e55806c6944827d801cd9c2d53f5da4186
41
https://github.com/wagtail/wagtail.git
78
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update( { "max_title_length": self.form.fields["ti
9
69
get_context_data
10
0
1
3
tests/providers/amazon/aws/hooks/test_lambda_function.py
45,514
Feature: Add invoke lambda function operator (#21686)
airflow
10
Python
10
test_lambda_function.py
def test_get_conn_returns_a_boto3_connection(self): hook = LambdaHook(aws_conn_id='aws_default') assert hook.conn is not None
33edeb2cb1c83c61f2ce5981066228d10a77df5b
20
https://github.com/apache/airflow.git
23
def test_get_conn_returns_a_boto3_connection(self): hook = LambdaHook(aws_conn_id='
6
34
test_get_conn_returns_a_boto3_connection
21
0
1
5
saleor/webhook/observability/tests/test_buffer.py
27,561
Observability reporter (#9803) * Initial commit * Add observability celery beat task * Add observability_reporter_task and observability_send_events * Convert payload to camel case * Add fakeredis to dev dependencies * Add redis buffer tests * Refactor buffer * Update * Optimize buffer * Add tests * Add types-redis to dev dependencies * Refactor * Fix after rebase * Refactor opentracing * Add opentracing to observability tasks * Add more tests * Fix buffer fixtures * Report dropped events * Fix buffer tests * Refactor get_buffer * Refactor unit tests * Set Redis connection client_name * Refactor redis tests * Fix test_get_or_create_connection_pool * Fix JsonTruncText comparison * Add more generate_event_delivery_attempt_payload tests
saleor
12
Python
14
test_buffer.py
def test_put_events_max_size(buffer): events = [{"event": "data"}] * MAX_SIZE * 2 dropped = buffer.put_events(events) assert buffer.size() == MAX_SIZE assert dropped == MAX_SIZE
7ea7916c65357741c3911e307acb58d547a5e91a
38
https://github.com/saleor/saleor.git
32
def test_put_events_max_size(buffer): events = [{"event": "data"}] * MAX_SIZE * 2 dropped = buffer.put_events(even
7
65
test_put_events_max_size
59
0
1
20
test/test_outputs.py
179,834
removed outdated outputs tests
gradio
12
Python
44
test_outputs.py
def test_as_component(self): ht_output = gr.outputs.HighlightedText(color_map={"pos": "green", "neg": "red"}) self.assertEqual( ht_output.get_template_context(), { "color_map": {"pos": "green", "neg": "red"}, "name": "highlightedtext", "label": None, "show_legend": False, "css": {} }, ) ht = {"pos": "Hello ", "neg": "World"} with tempfile.TemporaryDirectory() as tmpdirname: to_save = ht_output.save_flagged(tmpdirname, "ht_output", ht, None) self.assertEqual(to_save, '{"pos": "Hello ", "neg": "World"}') self.assertEqual( ht_output.restore_flagged(tmpdirname, to_save, None), {"pos": "Hello ", "neg": "World"}, )
6b259bde9572930d4c699fe5b75fc3b6b7c62234
135
https://github.com/gradio-app/gradio.git
275
def test_as_component(self): ht_output = gr.outputs.HighlightedText(color_map={"pos": "green", "neg": "red"}) self.assertEqual( ht_output.get_template_context(), { "color_map": {"pos": "green", "neg": "red"}, "name": "highlightedtext", "label": None, "show_legend": False, "css": {} }, ) ht = {"pos": "Hello ", "neg": "World"} with tempfile.TemporaryDirectory() as tmpdirname: to_save = ht_output.save_flagged(tmpdirname, "ht_output", ht, None) self.assertEqual(to_save, '{"pos": "Hello ", "neg": "World"}') self.assertEqual( ht_output.restore_flagged(tmpdirname, to_save, None), {"pos": "Hello ", "neg": "World"}, )
16
247
test_as_component
25
0
4
9
jina/serve/networking.py
13,712
refactor: add more debug info to prints (#5475) Signed-off-by: Johannes Messner <messnerjo@gmail.com>
jina
10
Python
22
networking.py
def host_is_local(hostname): import socket fqn = socket.getfqdn(hostname) if fqn in ('localhost', '0.0.0.0') or hostname == '0.0.0.0': return True try: return ipaddress.ip_address(hostname).is_loopback except ValueError: return False
8794fcd378b1f6fadc3f84a6492441ca0168483c
47
https://github.com/jina-ai/jina.git
64
def host_is_local(hostname):
9
84
host_is_local
64
0
6
13
datasets/red_caps/red_caps.py
104,061
Add RedCaps dataset (#3424) * Add RedCaps script * Improve script * Add underscore to global variables * Add README file * Add info for main config * Small improvements * Add dummy data * Minor fix in README * Specify timezone in features dict * Specify dataset name in README :) * Add instructions on how to download data * Specify user-agent
datasets
18
Python
38
red_caps.py
def _config_name_to_description(config_name): if config_name == "all": return "Contains data from all the subreddits" else: if re.match(r".*_\d{4}$", config_name): subreddit, year = config_name.split("_") year_str = "2008 - 2017" if year == "2017" else year else: subreddit = config_name year_str = ", ".join( ["2008 - 2017" if year == "2017" else year for year in _SUBREDDIT_TO_YEAR[config_name]] ) return f"Contains data from the {subreddit} subreddit posted in {year_str}"
5c9ad28ed03716e02eb1b95bd6094914cdd27df8
75
https://github.com/huggingface/datasets.git
167
def _config_name_to_description(config_name): if config_name == "all": return "Contains data from all the subreddits" else: if re.match(r".*_\d{4}$", confi
10
141
_config_name_to_description
9
0
2
4
python3.10.4/Lib/asyncio/base_events.py
220,325
add python 3.10.4 for windows
XX-Net
11
Python
9
base_events.py
def _asyncgen_finalizer_hook(self, agen): self._asyncgens.discard(agen) if not self.is_closed(): self.call_soon_threadsafe(self.create_task, agen.aclose())
8198943edd73a363c266633e1aa5b2a9e9c9f526
37
https://github.com/XX-net/XX-Net.git
33
def _asyncgen_finalizer_hook(self, agen): self._asyncgens.
9
60
_asyncgen_finalizer_hook
8
0
1
7
bootloader/waflib/Tools/irixcc.py
263,522
Bootloader: Building: Unpack waf's lib archive. Doing so makes it easier to modify. This is a temporary measure until the next waf version is released (although I'm tempted to keep it since it's much more IDE completion friendly).
pyinstaller
7
Python
8
irixcc.py
def configure(conf): conf.find_irixcc() conf.find_ar() conf.irixcc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags()
64ccb7aea824fbec57f7ed1bbe483ec486183c13
35
https://github.com/pyinstaller/pyinstaller.git
25
def configure(conf): conf.find_irixcc() conf.find_ar() conf.irixcc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags()
8
63
configure
34
0
4
14
homeassistant/components/todoist/calendar.py
298,274
Update todoist integration to use new official rest api library (#79481) * Swapping out libraries. * Adding types * Add ability to add task. * Removed remaining todos. * Fix lint errors. * Fixing tests. * Update to v2 of the rest api. * Swapping out libraries. * Adding types * Add ability to add task. * Removed remaining todos. * Fix lint errors. * Fix mypy errors * Fix custom projects. * Bump DEPENDENCY_CONFLICTS const * Remove conflict bump * Addressing PR feedback. * Removing utc offset logic and configuration. * Addressing PR feedback. * Revert date range logic check
core
14
Python
27
calendar.py
def calendar_event(self) -> CalendarEvent | None: if not self.event: return None start = self.event[START] if self.event.get(ALL_DAY) or self.event[END] is None: return CalendarEvent( summary=self.event[SUMMARY], start=start.date(), end=start.date() + timedelta(days=1), ) return CalendarEvent( summary=self.event[SUMMARY], start=start, end=self.event[END] )
8cbbdf21f3d2ecaedb95d44b667a60302c137fbf
106
https://github.com/home-assistant/core.git
165
def calendar_event(self) -> CalendarEvent | None: if not self.event: return None start = self.event[START] if self.event.get(ALL_DAY) or self.event[END] is None: return CalendarEvent( summary=self.event[SUMMARY],
15
162
calendar_event
17
0
1
6
wagtail/api/v2/tests/test_pages.py
72,885
Reformat with black
wagtail
11
Python
16
test_pages.py
def test_empty_searches_work(self): response = self.get_response(search="") content = json.loads(response.content.decode("UTF-8")) self.assertEqual(response.status_code, 200) self.assertEqual(response["Content-type"], "application/json") self.assertEqual(content["meta"]["total_count"], 0) # REGRESSION TESTS
d10f15e55806c6944827d801cd9c2d53f5da4186
65
https://github.com/wagtail/wagtail.git
54
def test_empty_searches_work(self): response = self.get_response(search="") content = json.loads(response.content.decode("UTF-8")) self.assertEqual(response.status_code, 200) self.assertEqual(response["Content-type"], "application/json") self.assertEqual(content["meta"]["total_count"], 0)
11
113
test_empty_searches_work
108
1
1
34
tests/components/generic/test_camera.py
313,553
Use create_stream in generic camera config flow (#73237) * Use create_stream in generic camera config flow
core
13
Python
84
test_camera.py
async def test_stream_source_error(hass, hass_client, hass_ws_client, fakeimgbytes_png): respx.get("http://example.com").respond(stream=fakeimgbytes_png) assert await async_setup_component( hass, "camera", { "camera": { "name": "config_test", "platform": "generic", "still_image_url": "http://example.com", # Does not exist "stream_source": 'http://example.com/{{ states.sensor.temp.state + "a" }}', "limit_refetch_to_url_change": True, }, }, ) assert await async_setup_component(hass, "stream", {}) await hass.async_block_till_done() with patch( "homeassistant.components.camera.Stream.endpoint_url", return_value="http://home.assistant/playlist.m3u8", ) as mock_stream_url: # Request playlist through WebSocket client = await hass_ws_client(hass) await client.send_json( {"id": 1, "type": "camera/stream", "entity_id": "camera.config_test"} ) msg = await client.receive_json() # Assert WebSocket response assert mock_stream_url.call_count == 0 assert msg["id"] == 1 assert msg["type"] == TYPE_RESULT assert msg["success"] is False assert msg["error"] == { "code": "start_stream_failed", "message": "camera.config_test does not support play stream service", } @respx.mock
b1f2e5f897540967ebef2ccf98026d70009b5c4f
@respx.mock
169
https://github.com/home-assistant/core.git
402
async def test_stream_source_error(hass, hass_client, hass_ws_client, fakeimgbytes_png): respx.get("http://example.com").respond(stream=fakeimgbytes_png) assert await async_setup_component( hass, "camera", { "camera": { "name": "config_test", "platform": "generic", "still_ima
21
320
test_stream_source_error
237
0
15
56
homeassistant/components/frontier_silicon/media_player.py
301,798
Address late comments for frontier silicon (#72745) Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
core
15
Python
145
media_player.py
async def async_update(self): afsapi = self.fs_device try: if await afsapi.get_power(): status = await afsapi.get_play_status() self._state = { PlayState.PLAYING: STATE_PLAYING, PlayState.PAUSED: STATE_PAUSED, PlayState.STOPPED: STATE_IDLE, PlayState.LOADING: STATE_OPENING, None: STATE_IDLE, }.get(status) else: self._state = STATE_OFF except FSConnectionError: if self._attr_available: _LOGGER.warning( "Could not connect to %s. Did it go offline?", self._name or afsapi.webfsapi_endpoint, ) self._attr_available = False return if not self._attr_available: _LOGGER.info( "Reconnected to %s", self._name or afsapi.webfsapi_endpoint, ) self._attr_available = True if not self._name: self._name = await afsapi.get_friendly_name() if not self._source_list: self.__modes_by_label = { mode.label: mode.key for mode in await afsapi.get_modes() } self._source_list = list(self.__modes_by_label) # The API seems to include 'zero' in the number of steps (e.g. if the range is # 0-40 then get_volume_steps returns 41) subtract one to get the max volume. # If call to get_volume fails set to 0 and try again next time. if not self._max_volume: self._max_volume = int(await afsapi.get_volume_steps() or 1) - 1 if self._state != STATE_OFF: info_name = await afsapi.get_play_name() info_text = await afsapi.get_play_text() self._title = " - ".join(filter(None, [info_name, info_text])) self._artist = await afsapi.get_play_artist() self._album_name = await afsapi.get_play_album() self._source = (await afsapi.get_mode()).label self._mute = await afsapi.get_mute() self._media_image_url = await afsapi.get_play_graphic() volume = await self.fs_device.get_volume() # Prevent division by zero if max_volume not known yet self._volume_level = float(volume or 0) / (self._max_volume or 1) else: self._title = None self._artist = None self._album_name = None self._source = None self._mute = None self._media_image_url = None self._volume_level = None # Management actions # power control
db9c586404d8fb0e520e731ccb0229d08ffd7161
368
https://github.com/home-assistant/core.git
951
async def async_update(self): afsapi = self.fs_device try: if await afsapi.get_power(): status = await afsapi.get_play_status() self._state = { PlayState.PLAYING: STATE_PLAYING, PlayState.PAUSED: STATE_PAUSED, PlayState.STOPPED: STATE_IDLE, PlayState.LOADING: STATE_OPENING, None: STATE_IDLE, }.get(status) else: self._state = STATE_OFF except FSConnectionError: if self._attr_available: _LOGGER.warning( "Could not connect to %s. Did it go offline?", self._name or afsapi.webfsapi_endpoint, ) self._attr_available = False return if not self._attr_available: _LOGGER.info( "Reconnected to %s", self._name or afsapi.webfsapi_
58
606
async_update
11
0
1
3
dask/diagnostics/profile.py
156,122
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
dask
8
Python
11
profile.py
def _plot(self, **kwargs): from dask.diagnostics.profile_visualize import plot_tasks return plot_tasks(self.results, self._dsk, **kwargs)
cccb9d8d8e33a891396b1275c2448c352ef40c27
30
https://github.com/dask/dask.git
24
def _plot(self, **kwargs): from dask.diagnostics.profile_visualize import plot_tasks return plot_t
9
44
_plot
81
0
1
37
keras/callbacks_test.py
279,805
add unit test for start_from_epoch to EarlyStop
keras
12
Python
45
callbacks_test.py
def test_EarlyStopping_with_start_from_epoch(self): with self.cached_session(): np.random.seed(1337) (data, labels), _ = test_utils.get_test_data( train_samples=100, test_samples=50, input_shape=(1,), num_classes=NUM_CLASSES, ) model = test_utils.get_small_sequential_mlp( num_hidden=1, num_classes=1, input_dim=1 ) model.compile( optimizer="sgd", loss="binary_crossentropy", metrics=["acc"] ) start_from_epoch = 2 patience = 3 stopper = keras.callbacks.EarlyStopping( monitor="acc", patience=patience, start_from_epoch=start_from_epoch, ) hist = model.fit( data, labels, callbacks=[stopper], verbose=0, epochs=20 ) assert len(hist.epoch) >= patience + start_from_epoch start_from_epoch = 2 patience = 0 stopper = keras.callbacks.EarlyStopping( monitor="acc", patience=patience, start_from_epoch=start_from_epoch, ) hist = model.fit( data, labels, callbacks=[stopper], verbose=0, epochs=20 ) assert len(hist.epoch) >= start_from_epoch
c492e45a017ecff5196a45d962d1618cac89467a
210
https://github.com/keras-team/keras.git
528
def test_EarlyStopping_with_start_from_epoch(self): with self.cached_session(): np.random.seed(1337) (data, labels), _ = test_utils.get_test_data( train_samples=100, test_samples=50, input_shape=(1,), num_classes=NUM_CLASSES, ) model = test_utils.get_small_sequential_mlp( num_hidden=1, num_classes=1, input_dim=1 ) model.compile( optimizer="sgd", loss="binary_crossentropy", metrics=["acc"] ) start_from_epoch = 2 patience = 3 stopper = keras.callbacks.EarlyStopping( monitor="acc", patience=patience, start_from_epoch=start_from_epoch, ) hist = model.fit( data, labels, callbacks=[stopper], verbose=0, epochs=20 ) assert len(hist.epoch) >= patience + start_from_epoch start_from_epoch = 2 patience = 0 stopper = ke
37
319
test_EarlyStopping_with_start_from_epoch
20
0
3
9
src/prefect/orion/models/block_schemas.py
55,969
Nested Block Schemas (PrefectHQ/orion#1846) * Adds models and migration for block schema and block document references * Adds customization to the generation of a block schema's fields * Adds ability to reconstruct block schema fields on read * Adds ability to reconstruct block schema when read by checksum * Adds schema reconstruction when reading multiple block schemas * Adds ordering to query of recursive CTE * Refactors to make code path and purpose easier to follow
prefect
11
Python
20
block_schemas.py
def _find_block_schema_via_checksum(block_schemas_with_references, checksum): return next( ( block_schema for block_schema, _, _ in block_schemas_with_references if block_schema.checksum == checksum ), None, )
a05e44c89acf0b6073ac876479be24a5e51d7754
32
https://github.com/PrefectHQ/prefect.git
83
def _find_block_schema_via_checksum(block_schemas_with_references, checksum): return next( ( block_schema for block_schema, _, _ in block_schemas_with_references if block_schema.checksum == checksum ), None, )
6
48
_find_block_schema_via_checksum
77
0
3
31
awx/main/tests/unit/test_tasks.py
80,579
Decoupled callback functions from BaseTask Class --- Removed all callback functions from 'jobs.py' and put them in a new file '/awx/main/tasks/callback.py' --- Modified Unit tests unit moved --- Moved 'update_model' from jobs.py to /awx/main/utils/update_model.py
awx
11
Python
59
test_tasks.py
def test_vars_unsafe_by_default(self, job, private_data_dir): job.created_by = User(pk=123, username='angry-spud') job.inventory = Inventory(pk=123, name='example-inv') task = jobs.RunJob() task.build_extra_vars_file(job, private_data_dir) fd = open(os.path.join(private_data_dir, 'env', 'extravars')) extra_vars = yaml.load(fd, Loader=SafeLoader) # ensure that strings are marked as unsafe for unsafe in [ 'awx_job_template_name', 'tower_job_template_name', 'awx_user_name', 'tower_job_launch_type', 'awx_project_revision', 'tower_project_revision', 'tower_user_name', 'awx_job_launch_type', 'awx_inventory_name', 'tower_inventory_name', ]: assert hasattr(extra_vars[unsafe], '__UNSAFE__') # ensure that non-strings are marked as safe for safe in [ 'awx_job_template_id', 'awx_job_id', 'awx_user_id', 'tower_user_id', 'tower_job_template_id', 'tower_job_id', 'awx_inventory_id', 'tower_inventory_id', ]: assert not hasattr(extra_vars[safe], '__UNSAFE__')
443bdc1234682dd0004bae372078512fcf37cce9
150
https://github.com/ansible/awx.git
380
def test_vars_unsafe_by_default(self, job, private_data_dir): job.created_by = User(pk=123, username='angry-spud') job.inventory = Inventory(pk=123, name='example-inv') task = jobs.RunJob() task.build_extra_vars_file(job, private_data_dir) fd = open(os.path.join(private_data_dir, 'env', 'extravars')) extra_vars = yaml.load(fd, Loader=SafeLoader) # ensure that strings are marked as unsafe for unsafe in [ 'awx_job_template_name', 'tower_job_template_name', 'awx_user_name', 'tower_job_launch_type', 'awx_project_revision', 'tower_project_revision', 'tower_user_name', 'awx_job_launch_type', 'awx_inventory_name',
28
256
test_vars_unsafe_by_default
26
0
2
7
nuitka/utils/CStrings.py
178,838
Python3.7+: Added support for get_resource_reader to our loader * This allows to avoid a useless file copy to a temporary file in case a "importlib.resources.path" is used. * Also fixed a few typos in tests. * And avoid compiling the meta path based loader separately, so it can use compiled code helpers easily.
Nuitka
10
Python
20
CStrings.py
def encodePythonUnicodeToC(value): assert type(value) is unicode, type(value) result = "" for c in value: cv = ord(c) result += r"\%o" % cv return 'L"%s"' % result
70b7eee9555c8d5599d096eaf600521475b001d9
42
https://github.com/Nuitka/Nuitka.git
55
def encodePythonUnicodeToC(value): assert type(value) is unicode, type(value) result = "" for c in
8
73
encodePythonUnicodeToC
46
0
3
9
test/distributed/_sharded_tensor/ops/test_binary_cmp.py
102,422
Implement torch.allclose for sharded tensor. (#70331) Summary: Implement torch.allclose op for sharded tensors. Pull Request resolved: https://github.com/pytorch/pytorch/pull/70331 Test Plan: Automated test added. pritamdamania87 Fixes https://github.com/pytorch/pytorch/issues/67112 cc pietern mrshenli pritamdamania87 zhaojuanmao satgera rohan-varma gqchen aazzolini osalpekar jiayisuse SciPioneer H-Huang Reviewed By: pritamdamania87 Differential Revision: D33339137 Pulled By: kumpera fbshipit-source-id: 4263e468eaa117317b190f69877bf3f8bbac5658
pytorch
9
Python
32
test_binary_cmp.py
def get_random_tensors(self, spec1, spec2, *sizes, pg1=None, pg2=None, seed_offset=0): pg1 = _get_default_group() if pg1 is None else pg1 pg2 = _get_default_group() if pg2 is None else pg2 torch.manual_seed(TestShardedTensorBinaryOps.seed) st1 = _sharded_tensor.rand(spec1, sizes, process_group=pg1) torch.manual_seed(TestShardedTensorBinaryOps.seed + seed_offset) st2 = _sharded_tensor.rand(spec2, sizes, process_group=pg2) TestShardedTensorBinaryOps.seed += 1 return st1, st2
2378421340e5aec0033d564be7b706e8f903b146
101
https://github.com/pytorch/pytorch.git
101
def get_random_tensors(self, spec1, spec2, *sizes, pg1=None, pg2=None, seed_offset=0): pg1 = _get_default_
18
149
get_random_tensors
11
1
1
2
tests/timetables/test_events_timetable.py
46,956
Events Timetable (#22332) This Timetable will be widely useful for timing based on sporting events, planned communication campaigns, and other schedules that are arbitrary and irregular but predictable.
airflow
10
Python
11
test_events_timetable.py
def restricted_timetable(): return EventsTimetable(event_dates=EVENT_DATES, restrict_to_events=True) @pytest.mark.parametrize( "start, end", list(zip(EVENT_DATES, EVENT_DATES)), )
582e0d53af78f881cc0f9e5b063bef11f18f7999
@pytest.mark.parametrize( "start, end", list(zip(EVENT_DATES, EVENT_DATES)), )
15
https://github.com/apache/airflow.git
17
def restricted_timetable(): return EventsTimet
10
56
restricted_timetable
22
0
1
6
.venv/lib/python3.8/site-packages/pip/_internal/vcs/subversion.py
61,385
upd; format
transferlearning
10
Python
20
subversion.py
def switch(self, dest, url, rev_options): # type: (str, HiddenText, RevOptions) -> None cmd_args = make_command( 'switch', self.get_remote_call_options(), rev_options.to_args(), url, dest, ) self.run_command(cmd_args)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
40
https://github.com/jindongwang/transferlearning.git
71
def switch(self, dest, url, rev_options): # type: (str, HiddenText, RevOptions) -> None cmd_args = make_command( 'switch', self.get_remote_call_options(), rev_options.to_args(), url, dest, ) self.run_command(cmd_args)
10
61
switch
128
0
1
29
zerver/tests/test_events.py
83,726
actions: Add function to add and remove subgroups from a user group.
zulip
13
Python
67
test_events.py
def test_user_group_events(self) -> None: othello = self.example_user("othello") events = self.verify_action( lambda: check_add_user_group( self.user_profile.realm, "backend", [othello], "Backend team" ) ) check_user_group_add("events[0]", events[0]) # Test name update backend = UserGroup.objects.get(name="backend") events = self.verify_action(lambda: do_update_user_group_name(backend, "backendteam")) check_user_group_update("events[0]", events[0], "name") # Test description update description = "Backend team to deal with backend code." events = self.verify_action(lambda: do_update_user_group_description(backend, description)) check_user_group_update("events[0]", events[0], "description") # Test add members hamlet = self.example_user("hamlet") events = self.verify_action(lambda: bulk_add_members_to_user_group(backend, [hamlet.id])) check_user_group_add_members("events[0]", events[0]) # Test remove members hamlet = self.example_user("hamlet") events = self.verify_action(lambda: remove_members_from_user_group(backend, [hamlet.id])) check_user_group_remove_members("events[0]", events[0]) api_design = create_user_group( "api-design", [hamlet], hamlet.realm, description="API design team" ) # Test add subgroups events = self.verify_action(lambda: add_subgroups_to_user_group(backend, [api_design])) check_user_group_add_subgroups("events[0]", events[0]) # Test remove subgroups events = self.verify_action(lambda: remove_subgroups_from_user_group(backend, [api_design])) check_user_group_remove_subgroups("events[0]", events[0]) # Test remove event events = self.verify_action(lambda: check_delete_user_group(backend.id, othello)) check_user_group_remove("events[0]", events[0])
b4a9311ef296da9d50f176b775d8452f99d12c55
284
https://github.com/zulip/zulip.git
392
def test_user_group_events(self) -> None: othello = self.example_user("othello") events = self.verify_action( lambda: check_add_user_group( self.user_profile.realm, "backend", [othello], "Backend team" ) ) check_user_group_add("events[0]", events[0]) # Test name update backend = UserGroup.objects.get(name="backend")
33
473
test_user_group_events
13
0
1
3
tests/orion/models/test_work_queues.py
53,828
Add work queue models and schemas
prefect
10
Python
13
test_work_queues.py
async def test_read_work_queue(self, work_queues, session): read_work_queue = await models.work_queues.read_work_queues(session=session) assert len(read_work_queue) == len(work_queues)
bd98b7589b4da7405da6f93fd0df5b452ef02b4b
32
https://github.com/PrefectHQ/prefect.git
26
async def test_read_work_queue(self, work_queues, session): read_wor
8
51
test_read_work_queue
62
0
5
15
tests/_core/test_plot.py
42,103
Allow Plot.label to control title(s) (#2934) * Don't show facet variable names in facet titles * Don't document Plot.label as accepting None as a value * Allow Plot.label to control titles, including when faceting * Don't include separator in labeled facet title * Clean up title typing * Fix legend test * Fix legend contents typing after rebase * Add theme update to Plot.clone and remove outdated todo
seaborn
13
Python
48
test_plot.py
def check_facet_results_2d(self, p, df, variables, order=None): p = p.plot() if order is None: order = {dim: categorical_order(df[key]) for dim, key in variables.items()} levels = itertools.product(*[order[dim] for dim in ["row", "col"]]) assert len(p._subplots) == len(list(levels)) for subplot, (row_level, col_level) in zip(p._subplots, levels): assert subplot["row"] == row_level assert subplot["col"] == col_level assert subplot["axes"].get_title() == ( f"{col_level} | {row_level}" ) assert_gridspec_shape( subplot["axes"], len(levels["row"]), len(levels["col"]) )
a259ac55c4233ab3418459d3b6cd195ebe2cb521
156
https://github.com/mwaskom/seaborn.git
203
def check_facet_results_2d(self, p, df, variables, order=None): p = p.plot() if order is None: order = {dim: categorical_order(df[key]) for dim, key in variables.items()} levels = itertools.product(*[order[dim] for dim in ["row", "col
23
255
check_facet_results_2d
32
0
1
13
test/mitmproxy/addons/test_save.py
250,763
Rotate stream files (#5097) * Example addon for saving streamed data including a small bug fix to make it work. * Revert "Example addon for saving streamed data including a small bug fix to make it work." This reverts commit 02ab78def9a52eaca1a89d0757cd9475ce250eaa. * Add support for rotating stream files every hour or day * Added tests * Modified to change the stream file every time the formating string changes as time moves on. * Update to more compact version * simplify save addon logic * make mypy happy * fix compatibility with Python 3.8 Co-authored-by: Maximilian Hils <git@maximilianhils.com>
mitmproxy
12
Python
25
test_save.py
def test_tcp(tmp_path): sa = save.Save() with taddons.context(sa) as tctx: p = str(tmp_path / "foo") tctx.configure(sa, save_stream_file=p) tt = tflow.ttcpflow() sa.tcp_start(tt) sa.tcp_end(tt) tt = tflow.ttcpflow() sa.tcp_start(tt) sa.tcp_error(tt) tctx.configure(sa, save_stream_file=None) assert len(rd(p)) == 2
3a5550a09cd40d76acfe71aa45c7a8309525ad51
98
https://github.com/mitmproxy/mitmproxy.git
107
def test_tcp(tmp_path): sa = save.Save() with taddons.context(sa) as tctx: p = str(tmp_path / "foo") tctx.configure(sa, save_stream_file=p) tt = tflow.ttcpflow() sa.tcp_start(tt) sa.tcp_end(tt) tt = tflow.ttcpflow() sa.tcp_sta
20
165
test_tcp
34
0
1
12
tests/sentry/api/endpoints/test_organization_metric_data.py
91,826
feat(snuba): Inject meta into snuba results converter [TET-131] (#35675) * feat(snuba): Inject meta into snuba results converter [TET-131] Add meta data in the resulting response from get_series as we are just discarding it and returning the coalesced data portion. Fixes [TET-131]
sentry
12
Python
32
test_organization_metric_data.py
def test_private_transactions_derived_metric(self): response = self.get_response( self.organization.slug, project=[self.project.id], field=["transaction.all"], statsPeriod="1m", interval="1m", ) assert response.data["detail"] == ( "Failed to parse 'transaction.all'. Must be something like 'sum(my_metric)', " "or a supported aggregate derived metric like `session.crash_free_rate`" )
350ecb60d81a26ba63614fb4c87448cfeaceac7c
54
https://github.com/getsentry/sentry.git
138
def test_private_transactions_derived_metric(self): response = self.get_response( self.organization.slug, project=[self.project.id], field=["transaction.all"],
12
91
test_private_transactions_derived_metric
271
0
10
47
keras/engine/training.py
271,601
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
16
Python
175
training.py
def from_config(cls, config, custom_objects=None): # `from_config` assumes `cls` is either `Functional` or a child class of # `Functional`. In the case that `cls` is meant to behave like a child class # of `Functional` but only inherits from the `Model` class, we have to call # `cls(...)` instead of `Functional.from_config`. from keras.engine import ( functional, ) # pylint: disable=g-import-not-at-top with generic_utils.SharedObjectLoadingScope(): functional_model_keys = [ "name", "layers", "input_layers", "output_layers", ] if all(key in config for key in functional_model_keys): inputs, outputs, layers = functional.reconstruct_from_config( config, custom_objects ) model = cls( inputs=inputs, outputs=outputs, name=config.get("name") ) functional.connect_ancillary_layers(model, layers) return model # The config does not contain all the information necessary to revive a # Functional model. This happens when the user creates subclassed models # where `get_config()` is returning insufficient information to be # considered a Functional model. In this case, we fall back to provide # all config into the constructor of the class. optimizer, loss = None, None optimizer_dict = config.pop("optimizer", {}) if optimizer_dict: optimizer = saving_lib.deserialize_keras_object(optimizer_dict) loss_dict = config.pop("loss", {}) if loss_dict: loss = saving_lib.deserialize_keras_object(loss_dict) input_shape = config.pop("input_shape", {}) try: model = cls(**config) except TypeError as e: raise TypeError( "Unable to revive model from config. When overriding " "the `get_config()`, make sure that the returned " "config contains all items used as arguments in the " f"constructor to {cls}, which is the default behavior. " "You can override this default behavior by defining a " "`from_config` method to specify how to create an " f"instance of {cls.__name__} from the config. \n\n" f"Error encountered during deserialization:\n{e}" ) if saving_lib._ENABLED: # pylint: disable=protected-access if optimizer or loss: model.compile(optimizer=optimizer, loss=loss) if input_shape: model.build(input_shape) return model
84afc5193d38057e2e2badf9c889ea87d80d8fbf
220
https://github.com/keras-team/keras.git
1,013
def from_config(cls, config, custom_objects=None): # `from_config` assumes `cls` is either `Functional` or a child class of # `Functional`. In the case that `cls` is meant to behave like a child class # of `Functional` but only inherits from the `Model` class, we have to call # `cls(...)` instead of `Functional.from_config`. from keras.engine import ( functional, ) # pylint: disable=g-import-not-at-top with generic_utils.SharedObjectLoadingScope(): functional_model_keys = [ "name", "layers", "input_
34
393
from_config
22
0
2
10
yt_dlp/extractor/prx.py
162,344
[PRX] Add Extractors (#2245) Closes #2144, https://github.com/ytdl-org/youtube-dl/issues/15948 Authored by: coletdjnz
yt-dlp
12
Python
20
prx.py
def _story_playlist_entry(self, response): story = self._extract_story_info(response) if not story: return story.update({ '_type': 'url', 'url': 'https://beta.prx.org/stories/%s' % story['id'], 'ie_key': PRXStoryIE.ie_key() }) return story
85fee2215295b099d34350d9a9ff42c086e3aef2
49
https://github.com/yt-dlp/yt-dlp.git
100
def _story_playlist_entry(self, response): story = self._extract_story_info(response) if not story: return story.update({
8
88
_story_playlist_entry
23
0
1
8
pipenv/patched/notpip/_vendor/rich/_win32_console.py
21,558
Vendor in pip 22.1.2
pipenv
8
Python
21
_win32_console.py
def erase_start_of_line(self) -> None: row, col = self.cursor_position start = WindowsCoordinates(row, 0) FillConsoleOutputCharacter(self._handle, " ", length=col, start=start) FillConsoleOutputAttribute( self._handle, self._default_attrs, length=col, start=start )
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
57
https://github.com/pypa/pipenv.git
76
def erase_start_of_line(self) -> None: row, col = self.cursor_
12
88
erase_start_of_line
15
0
2
5
netbox/dcim/filtersets.py
265,603
#8580 add interface filters for connected
netbox
13
Python
14
filtersets.py
def filter_is_occupied(self, queryset, name, value): if value: return queryset.filter(Q(cable__isnull=False) | Q(mark_connected=True)) else: return queryset.filter(cable__isnull=True, mark_connected=False)
b4877e7fac49282a766ebcdd2f886f71e8d61fa5
48
https://github.com/netbox-community/netbox.git
50
def filter_is_occupied(self, queryset, name, value): if value: return queryset.filter(Q(cable__isnull=False) | Q(mark_connected=True)) else:
9
73
filter_is_occupied
71
0
1
40
test/test_components.py
181,412
LinePlot (#2807) * First draft * Fix tests * Fix pdb * Figure out stroke dash + legend position * Add legend position * Add back gif * Add demo + update demo * Format notebooks * Changelog * More changelog * Undo notebooks * Reword * Set lower bound for altair * Modify docstrings * Add LinePlot image to changelog
gradio
11
Python
43
test_components.py
def test_legend_position(self): plot = gr.ScatterPlot( show_label=False, title="Two encodings", x="Horsepower", y="Miles_per_Gallon", color="Acceleration", color_legend_position="none", color_legend_title="Foo", shape="Origin", shape_legend_position="none", shape_legend_title="Bar", size="Acceleration", size_legend_title="Accel", size_legend_position="none", ) output = plot.postprocess(cars) config = json.loads(output["plot"]) assert config["encoding"]["color"]["legend"] is None assert config["encoding"]["shape"]["legend"] is None assert config["encoding"]["size"]["legend"] is None output = gr.ScatterPlot.update( value=cars, title="Two encodings", x="Horsepower", y="Miles_per_Gallon", color="Acceleration", color_legend_position="top", color_legend_title="Foo", shape="Origin", shape_legend_position="bottom", shape_legend_title="Bar", size="Acceleration", size_legend_title="Accel", size_legend_position="left", ) config = json.loads(output["value"]["plot"]) assert config["encoding"]["color"]["legend"]["orient"] == "top" assert config["encoding"]["shape"]["legend"]["orient"] == "bottom" assert config["encoding"]["size"]["legend"]["orient"] == "left"
96297c0bad09ee82e65d56a53f96ee9814bb8360
245
https://github.com/gradio-app/gradio.git
447
def test_legend_position(self): plot = gr.ScatterPlot( show_label=False, title="Two encodings", x="Horsepower", y="Miles_per_Gallon", color="Acceleration", color_legend_position="none", color_legend_title="Foo", shape="Origin", shape_legend_position="none", shape_legend_title="Bar", size="Acceleration", size_legend_title="Accel", size_legend_position="none", ) output = plot.postprocess(cars) config = json.loads(output["plot"]) assert config["encoding"]["color"]["legend"] is None assert config["encoding"]["shape"]["legend"] is None assert config["encoding"]["size"]["legend"] is None output = gr.ScatterPlot.update( value=cars, title="Two encodings", x="Horsepower", y="Miles_per
26
439
test_legend_position