n_words
int64 3
1.95k
| n_ast_errors
int64 0
2
| complexity
int64 1
151
| nloc
int64 2
546
| path
stringlengths 8
125
| id
int64 280
339k
| commit_message
stringlengths 3
18.1k
| repo
stringlengths 3
28
| ast_levels
int64 4
28
| language
stringclasses 1
value | vocab_size
int64 3
677
| file_name
stringlengths 5
67
| code
stringlengths 101
24k
| commit_id
stringlengths 40
40
| ast_errors
stringlengths 0
2.76k
| token_counts
int64 7
3.77k
| url
stringlengths 31
61
| n_whitespaces
int64 4
13.9k
| random_cut
stringlengths 21
13.9k
| n_identifiers
int64 1
157
| n_ast_nodes
int64 10
3.6k
| fun_name
stringlengths 3
72
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
87 | 0 | 2 | 17 | python/ray/autoscaler/_private/gcp/config.py | 130,477 | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | ray | 10 | Python | 60 | config.py | def _configure_project(config, crm):
config = copy.deepcopy(config)
project_id = config["provider"].get("project_id")
assert config["provider"]["project_id"] is not None, (
"'project_id' must be set in the 'provider' section of the autoscaler"
" config. Notice that the project id must be globally unique."
)
project = _get_project(project_id, crm)
if project is None:
# Project not found, try creating it
_create_project(project_id, crm)
project = _get_project(project_id, crm)
assert project is not None, "Failed to create project"
assert (
project["lifecycleState"] == "ACTIVE"
), "Project status needs to be ACTIVE, got {}".format(project["lifecycleState"])
config["provider"]["project_id"] = project["projectId"]
return config
| 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | 110 | https://github.com/ray-project/ray.git | 166 | def _configure_project(config, crm):
config = copy.deepcopy(config)
project_id = config["provider"].get("project_id")
assert config["provider"]["project_id"] is not None, (
"'project_id' must be set in the 'provider' section of the autoscaler"
" config. Notice that the project id must be globally unique."
)
project = _get_project(project_id, crm)
if project is None:
# Project not found, try creating it
_create_project(project_id, crm)
project = _get_project(project_id, crm)
assert proje | 11 | 193 | _configure_project |
|
31 | 0 | 1 | 11 | test/mitmproxy/addons/test_intercept.py | 252,338 | Add support for raw UDP. (#5414) | mitmproxy | 11 | Python | 21 | test_intercept.py | async def test_udp():
r = intercept.Intercept()
with taddons.context(r) as tctx:
tctx.configure(r, intercept="~udp")
f = tflow.tudpflow()
await tctx.cycle(r, f)
assert f.intercepted
tctx.configure(r, intercept_active=False)
f = tflow.tudpflow()
await tctx.cycle(r, f)
assert not f.intercepted
| cd4a74fae7cbd8119afc3900597f798ec1604db7 | 82 | https://github.com/mitmproxy/mitmproxy.git | 92 | async def test_udp():
r = intercept.Intercept()
with taddons.context(r) as tctx:
tctx.configure(r, intercept="~udp")
f = tflow.tudpflow()
await tctx.cycle(r, f)
assert f.intercepted
tctx.configure(r, intercept_active=False)
f = tflow | 14 | 138 | test_udp |
|
76 | 0 | 3 | 6 | Tests/test_imagemath.py | 243,517 | some pylint warnings
Fixed some pylint issues | Pillow | 14 | Python | 53 | test_imagemath.py | def pixel(im):
if hasattr(im, "im"):
return f"{im.mode} {repr(im.getpixel((0, 0)))}"
if isinstance(im, int):
return int(im) # hack to deal with booleans
print(im)
A = Image.new("L", (1, 1), 1)
B = Image.new("L", (1, 1), 2)
Z = Image.new("L", (1, 1), 0) # Z for zero
F = Image.new("F", (1, 1), 3)
I = Image.new("I", (1, 1), 4) # noqa: E741
A2 = A.resize((2, 2))
B2 = B.resize((2, 2))
images = {"A": A, "B": B, "F": F, "I": I}
| 69baeccf2ee7850ccfb9b2b05ab584b87ad50fe1 | 33 | https://github.com/python-pillow/Pillow.git | 93 | def pixel(im):
if hasattr(im, "im"):
return f"{im.mode} {repr(im.getpixel((0, 0)))}"
if isinstance(im, int):
return int(im) # hack to deal with booleans
print(im)
A = Image.new("L", (1, 1), 1)
B = Image.new("L", (1, 1), 2)
Z = Image.new("L", (1, 1), 0) # Z for zero
F = Image.new("F", (1, 1), 3)
I = Image.new("I", (1, 1), 4) # noqa: E741
A2 = A.resize((2, 2))
B2 = B.resize((2, 2))
images = {"A": A, "B": B, "F": F, "I": I | 20 | 277 | pixel |
|
26 | 0 | 1 | 8 | test/mitmproxy/test_http.py | 252,006 | make it black! | mitmproxy | 13 | Python | 19 | test_http.py | def test_get_multipart_form(self):
request = treq(content=b"foobar")
assert not request.multipart_form
request.headers["Content-Type"] = "multipart/form-data"
assert list(request.multipart_form.items()) == []
with mock.patch("mitmproxy.net.http.multipart.decode") as m:
m.side_effect = ValueError
assert list(request.multipart_form.items()) == []
| b3587b52b25077f68116b9852b041d33e7fc6601 | 70 | https://github.com/mitmproxy/mitmproxy.git | 82 | def test_get_multipart_form(self):
| 14 | 123 | test_get_multipart_form |
|
9 | 0 | 1 | 2 | tests/sentry/helpers/test_deprecation.py | 90,595 | feat(api): Add initial skeleton for deprecation decorator (#34980)
* feat(api): Add initial skeleton for deprecation decorator
The decorator will eventually handle all the overhead for a deprecated endpoint including
* Metric tracking
* Brownout
* Deprecation Headers
Later PRs will introduce crontab logic for brownout periods and unit tests
* Added a header for suggested API and an exit for self hosted
* Added the base for the deprecation test
* Added unit tests, but am running into issues with 403s
* tell pytest to ignore the endpoint
* Remove test prefix
* Got tests passing and handled time boundary corner case
* typo | sentry | 10 | Python | 9 | test_deprecation.py | def head(self, request):
return Response({"ok": True})
dummy_endpoint = DummyEndpoint.as_view()
| a7f6111fcf15ef09696991027e473dc42520dadc | 16 | https://github.com/getsentry/sentry.git | 14 | def head(self, request):
return Re | 7 | 39 | head |
|
32 | 0 | 2 | 13 | python/ray/workflow/workflow_storage.py | 133,510 | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | ray | 14 | Python | 31 | workflow_storage.py | def get_entrypoint_step_id(self) -> StepID:
# empty StepID represents the workflow driver
try:
return asyncio_run(self._locate_output_step_id(""))
except Exception as e:
raise ValueError(
"Fail to get entrypoint step ID from workflow"
f"[id={self._workflow_id}]"
) from e
| 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | 34 | https://github.com/ray-project/ray.git | 123 | def get_entrypoint_step_id(self) -> StepID:
# empty StepID represents the workflow driver
try:
| 9 | 71 | get_entrypoint_step_id |
|
47 | 0 | 3 | 14 | lib/matplotlib/figure.py | 110,070 | Emit "axes not compatible with tight_layout" in a single place.
... instead of triplicating it on the caller side. | matplotlib | 10 | Python | 43 | figure.py | def tight_layout(self, *, pad=1.08, h_pad=None, w_pad=None, rect=None):
# note that here we do not permanently set the figures engine to
# tight_layout but rather just perform the layout in place and remove
# any previous engines.
engine = TightLayoutEngine(pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=rect)
try:
self.set_layout_engine(engine)
engine.execute(self)
finally:
self.set_layout_engine(None)
| 7d2503b422f98686bef42e7caebe025540ca6aaa | 94 | https://github.com/matplotlib/matplotlib.git | 163 | def tight_layout(self, *, pad=1.08, h_pad=None, w_pad=None, rect=None):
# note that here we do not permanently set the figures engine to
# tight_layout but rather just perform the layout in place and remove
# any previous engines.
engine = TightLayoutEngine(pad=pad, h_pad=h_pad, w_pad=w_pad | 10 | 105 | tight_layout |
|
115 | 0 | 2 | 28 | tests/maskformer/test_modeling_maskformer.py | 35,869 | Maskformer (#15682)
* maskformer
* conflicts
* conflicts
* minor fixes
* feature extractor test fix
refactor MaskFormerLoss following conversation
MaskFormer related types should not trigger a module time import error
missed one
removed all the types that are not used
update config mapping
minor updates in the doc
resolved conversation that doesn't need a discussion
minor changes
resolved conversations
fixed DetrDecoder
* minor changes
minor changes
fixed mdx file
test feature_extractor return types
functional losses -> classes
removed the return type test for the feature extractor
minor changes + style + quality
* conflicts?
* rebase master
* readme
* added missing files
* deleded poolformers test that where in the wrong palce
* CI
* minor changes
* Apply suggestions from code review
Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>
* resolved conversations
* minor changes
* conversations
[Unispeech] Fix slow tests (#15818)
* remove soundfile old way of loading audio
* Adapt slow test
[Barthez Tokenizer] Fix saving (#15815)
[TFXLNet] Correct tf xlnet generate (#15822)
* [TFXLNet] Correct tf xlnet
* adapt test comment
Fix the push run (#15807)
Fix semantic segmentation pipeline test (#15826)
Fix dummy_inputs() to dummy_inputs in symbolic_trace doc (#15776)
Add model specific output classes to PoolFormer model docs (#15746)
* Added model specific output classes to poolformer docs
* Fixed Segformer typo in Poolformer docs
Adding the option to return_timestamps on pure CTC ASR models. (#15792)
* Adding the option to return_timestamps on pure CTC ASR models.
* Remove `math.prod` which was introduced in Python 3.8
* int are not floats.
* Reworking the PR to support "char" vs "word" output.
* Fixup!
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
* Quality.
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
HFTracer.trace should use/return self.graph to be compatible with torch.fx.Tracer (#15824)
Fix tf.concatenate + test past_key_values for TF models (#15774)
* fix wrong method name tf.concatenate
* add tests related to causal LM / decoder
* make style and quality
* clean-up
* Fix TFBertModel's extended_attention_mask when past_key_values is provided
* Fix tests
* fix copies
* More tf.int8 -> tf.int32 in TF test template
* clean-up
* Update TF test template
* revert the previous commit + update the TF test template
* Fix TF template extended_attention_mask when past_key_values is provided
* Fix some styles manually
* clean-up
* Fix ValueError: too many values to unpack in the test
* Fix more: too many values to unpack in the test
* Add a comment for extended_attention_mask when there is past_key_values
* Fix TFElectra extended_attention_mask when past_key_values is provided
* Add tests to other TF models
* Fix for TF Electra test: add prepare_config_and_inputs_for_decoder
* Fix not passing training arg to lm_head in TFRobertaForCausalLM
* Fix tests (with past) for TF Roberta
* add testing for pask_key_values for TFElectra model
Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
[examples/summarization and translation] fix readme (#15833)
Add ONNX Runtime quantization for text classification notebook (#15817)
Re-enable doctests for the quicktour (#15828)
* Re-enable doctests for the quicktour
* Re-enable doctests for task_summary (#15830)
* Remove &
Framework split model report (#15825)
Add TFConvNextModel (#15750)
* feat: initial implementation of convnext in tensorflow.
* fix: sample code for the classification model.
* chore: added checked for from the classification model.
* chore: set bias initializer in the classification head.
* chore: updated license terms.
* chore: removed ununsed imports
* feat: enabled argument during using drop_path.
* chore: replaced tf.identity with layers.Activation(linear).
* chore: edited default checkpoint.
* fix: minor bugs in the initializations.
* partial-fix: tf model errors for loading pretrained pt weights.
* partial-fix: call method updated
* partial-fix: cross loading of weights (4x3 variables to be matched)
* chore: removed unneeded comment.
* removed playground.py
* rebasing
* rebasing and removing playground.py.
* fix: renaming TFConvNextStage conv and layer norm layers
* chore: added initializers and other minor additions.
* chore: added initializers and other minor additions.
* add: tests for convnext.
* fix: integration tester class.
* fix: issues mentioned in pr feedback (round 1).
* fix: how output_hidden_states arg is propoagated inside the network.
* feat: handling of arg for pure cnn models.
* chore: added a note on equal contribution in model docs.
* rebasing
* rebasing and removing playground.py.
* feat: encapsulation for the convnext trunk.
* Fix variable naming; Test-related corrections; Run make fixup
* chore: added Joao as a contributor to convnext.
* rebasing
* rebasing and removing playground.py.
* rebasing
* rebasing and removing playground.py.
* chore: corrected copyright year and added comment on NHWC.
* chore: fixed the black version and ran formatting.
* chore: ran make style.
* chore: removed from_pt argument from test, ran make style.
* rebasing
* rebasing and removing playground.py.
* rebasing
* rebasing and removing playground.py.
* fix: tests in the convnext subclass, ran make style.
* rebasing
* rebasing and removing playground.py.
* rebasing
* rebasing and removing playground.py.
* chore: moved convnext test to the correct location
* fix: locations for the test file of convnext.
* fix: convnext tests.
* chore: applied sgugger's suggestion for dealing w/ output_attentions.
* chore: added comments.
* chore: applied updated quality enviornment style.
* chore: applied formatting with quality enviornment.
* chore: revert to the previous tests/test_modeling_common.py.
* chore: revert to the original test_modeling_common.py
* chore: revert to previous states for test_modeling_tf_common.py and modeling_tf_utils.py
* fix: tests for convnext.
* chore: removed output_attentions argument from convnext config.
* chore: revert to the earlier tf utils.
* fix: output shapes of the hidden states
* chore: removed unnecessary comment
* chore: reverting to the right test_modeling_tf_common.py.
* Styling nits
Co-authored-by: ariG23498 <aritra.born2fly@gmail.com>
Co-authored-by: Joao Gante <joao@huggingface.co>
Co-authored-by: Sylvain Gugger <Sylvain.gugger@gmail.com>
* minor changes
* doc fix in feature extractor
* doc
* typose
* removed detr logic from config
* removed detr logic from config
* removed num_labels
* small fix in the config
* auxilary -> auxiliary
* make style
* some test is failing
* fix a weird char in config prevending doc-builder
* retry to fix the doc-builder issue
* make style
* new try to fix the doc builder
* CI
* change weights to facebook
Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>
Co-authored-by: ariG23498 <aritra.born2fly@gmail.com>
Co-authored-by: Joao Gante <joao@huggingface.co>
Co-authored-by: Sylvain Gugger <Sylvain.gugger@gmail.com> | transformers | 13 | Python | 84 | test_modeling_maskformer.py | def test_inference_instance_segmentation_head(self):
model = MaskFormerForInstanceSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval()
feature_extractor = self.default_feature_extractor
image = prepare_img()
inputs = feature_extractor(image, return_tensors="pt").to(torch_device)
inputs_shape = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(inputs_shape, (1, 3, 800, 1088))
with torch.no_grad():
outputs = model(**inputs)
# masks_queries_logits
masks_queries_logits = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)
)
expected_slice = torch.tensor(
[[-1.3738, -1.7725, -1.9365], [-1.5978, -1.9869, -2.1524], [-1.5796, -1.9271, -2.0940]]
)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], expected_slice, atol=TOLERANCE))
# class_queries_logits
class_queries_logits = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1))
expected_slice = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
]
)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE))
| d83d22f578276e9f201b0b3b0f8f9bd68e86c133 | 351 | https://github.com/huggingface/transformers.git | 375 | def test_inference_instance_segmentation_head(self):
mo | 32 | 483 | test_inference_instance_segmentation_head |
|
28 | 0 | 1 | 13 | chart/tests/test_migrate_database_job.py | 45,131 | Add support for custom command and args in jobs (#20864) | airflow | 12 | Python | 25 | test_migrate_database_job.py | def test_default_command_and_args_airflow_version(self, airflow_version, expected_arg):
docs = render_chart(
values={
"airflowVersion": airflow_version,
},
show_only=["templates/jobs/migrate-database-job.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].command", docs[0]) is None
assert [
"bash",
"-c",
f"exec \\\n{expected_arg}",
] == jmespath.search("spec.template.spec.containers[0].args", docs[0])
| 4e17528fd2ed69020c3cdc6672e3093254f1477f | 65 | https://github.com/apache/airflow.git | 143 | def test_default_command_and_args_airflow_version(self, airflow_version, expected_arg):
docs = render_chart(
values={
"airflowVersion": airflow_version,
},
show_only=["templates/jobs/ | 10 | 110 | test_default_command_and_args_airflow_version |
|
11 | 0 | 1 | 3 | .venv/lib/python3.8/site-packages/pip/_internal/cli/parser.py | 60,532 | upd; format | transferlearning | 9 | Python | 11 | parser.py | def error(self, msg):
# type: (str) -> None
self.print_usage(sys.stderr)
self.exit(UNKNOWN_ERROR, f"{msg}\n")
| f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | 24 | https://github.com/jindongwang/transferlearning.git | 31 | def error(self, msg):
# type: (str) -> None
| 8 | 44 | error |
|
29 | 0 | 1 | 13 | wagtail/contrib/redirects/tests/test_redirects.py | 73,384 | Reformat with black | wagtail | 11 | Python | 28 | test_redirects.py | def test_edit_duplicate(self):
models.Redirect.objects.create(
old_path="/othertest", site=None, redirect_link="http://elsewhere.com/"
)
response = self.post(
{
"old_path": "/othertest",
"is_permanent": "on",
"site": "",
"redirect_link": "http://www.test.com/ive-been-edited",
}
)
# Should not redirect to index
self.assertEqual(response.status_code, 200)
| d10f15e55806c6944827d801cd9c2d53f5da4186 | 60 | https://github.com/wagtail/wagtail.git | 163 | def test_edit_duplicate(self):
models.Redirect.objects.create(
old_path="/othertest", site=None, redirect_link="h | 13 | 109 | test_edit_duplicate |
|
79 | 0 | 9 | 28 | yt_dlp/extractor/instagram.py | 162,503 | [instagram] Fix bug in 013322a95e396ab21c8febc3e560d8a177c87f4a
Closes #2552 | yt-dlp | 13 | Python | 58 | instagram.py | def _extract_product_media(self, product_media):
media_id = product_media.get('code') or product_media.get('id')
vcodec = product_media.get('video_codec')
dash_manifest_raw = product_media.get('video_dash_manifest')
videos_list = product_media.get('video_versions')
if not (dash_manifest_raw or videos_list):
return {}
formats = [{
'format_id': format.get('id'),
'url': format.get('url'),
'width': format.get('width'),
'height': format.get('height'),
'vcodec': vcodec,
} for format in videos_list or []]
if dash_manifest_raw:
formats.extend(self._parse_mpd_formats(self._parse_xml(dash_manifest_raw, media_id), mpd_id='dash'))
self._sort_formats(formats)
thumbnails = [{
'url': thumbnail.get('url'),
'width': thumbnail.get('width'),
'height': thumbnail.get('height')
} for thumbnail in traverse_obj(product_media, ('image_versions2', 'candidates')) or []]
return {
'id': media_id,
'duration': float_or_none(product_media.get('video_duration')),
'formats': formats,
'thumbnails': thumbnails
}
| c5332d7fbb654a7127aeb080b91f8e85b48796b4 | 215 | https://github.com/yt-dlp/yt-dlp.git | 323 | def _extract_product_media(self, product_media):
media_id = product_media.get('code') or product_media.get('id')
vcodec = product_media.get('video_codec')
dash_manifest_raw = product_media.get('video_dash_manifest')
videos_list = product_media.get('video_versions')
| 19 | 380 | _extract_product_media |
|
25 | 0 | 3 | 9 | wagtail/contrib/forms/views.py | 76,935 | Initialize locale attribute in SafePaginateListView.__init__ | wagtail | 15 | Python | 21 | views.py | def get(self, request, *args, **kwargs):
if getattr(settings, "WAGTAIL_I18N_ENABLED", False):
if request.GET.get("locale"):
self.locale = get_object_or_404(
Locale, language_code=request.GET["locale"]
)
else:
self.locale = Locale.get_default()
return super().get(request, *args, **kwargs)
| e777c22d70191382668efeb04981b4b4acb29905 | 76 | https://github.com/wagtail/wagtail.git | 124 | def get(self, request, *args, **kwargs):
if getattr(settings, "WAGTAIL_I18N_ENABLED", False):
if request.GET.get("locale"):
self.locale = get_object_or_404(
Locale, language_code=request.GET["locale"]
)
else:
self.locale = Locale.get_default()
r | 14 | 122 | get |
|
87 | 0 | 6 | 10 | src/prefect/orion/schemas/schedules.py | 58,128 | Ensure that UTC offsets dont get parsed as timezones (PrefectHQ/orion#2551) | prefect | 13 | Python | 67 | schedules.py | def default_timezone(cls, v, *, values, **kwargs):
# if was provided, make sure its a valid IANA string
if v and v not in pendulum.tz.timezones:
raise ValueError(f'Invalid timezone: "{v}"')
# otherwise infer the timezone from the anchor date
elif v is None and values.get("anchor_date"):
tz = values["anchor_date"].tz.name
if tz in pendulum.tz.timezones:
return tz
# sometimes anchor dates have "timezones" that are UTC offsets
# like "-04:00". This happens when parsing ISO8601 strings.
# In this case we, the correct inferred localization is "UTC".
else:
return "UTC"
return v
| 076fb7d6472874eeec670239590c78a763e0f72d | 71 | https://github.com/PrefectHQ/prefect.git | 228 | def default_timezone(cls, v, *, values, **kwargs):
# if was provided, make sure its a valid IANA string
if v and v not in pendulum.tz.timezones:
raise ValueError(f'Invalid timezone: "{v}"')
# otherwise infer the timezone from the anchor date
elif v is None and values.get("anchor_date"):
tz = values["anchor_date"].tz.name
if tz in pendulum.tz.timezones:
return tz
# sometimes anchor dates have "timezones" that | 11 | 124 | default_timezone |
|
16 | 0 | 1 | 7 | modules/image/classification/mobilenet_v2_dishes/test.py | 51,475 | update mobilenet_v2_dishes (#2018) | PaddleHub | 13 | Python | 15 | test.py | def test_classification2(self):
results = self.module.classification(
images=[cv2.imread('tests/test.jpg')]
)
data = results[0]
self.assertTrue('海鲜面' in data)
self.assertTrue(data['海鲜面'] > 0.01)
| 94949b0e9120b4bca5888a4d19ff9759a05dd54f | 51 | https://github.com/PaddlePaddle/PaddleHub.git | 61 | def test_classification2(self):
| 10 | 84 | test_classification2 |
|
61 | 0 | 3 | 16 | homeassistant/components/google/calendar.py | 296,766 | Move google calendar integration to aiohttp (#70173)
* Use new aiohttp based google client library in gcal_sync.
* Use base url in tests for shorter string
* Remove unnecessary line of code
* Jump to gcal-sync-0.4.1
* Update tests/components/google/conftest.py
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Update to gcal_sync 0.5.0 incorporating PR feedback
Co-authored-by: Martin Hjelmare <marhje52@gmail.com> | core | 12 | Python | 50 | calendar.py | async def async_update(self) -> None:
request = ListEventsRequest(calendar_id=self._calendar_id, search=self._search)
try:
result = await self._calendar_service.async_list_events(request)
except ApiException as err:
_LOGGER.error("Unable to connect to Google: %s", err)
return
# Pick the first visible event and apply offset calculations.
valid_items = filter(self._event_filter, result.items)
event = copy.deepcopy(next(valid_items, None))
if event:
(event.summary, offset) = extract_offset(event.summary, self._offset)
self._event = _get_calendar_event(event)
self._offset_value = offset
else:
self._event = None
| 0e0c0ce22b0a87e0c709abab2091dc5bfddb42bb | 117 | https://github.com/home-assistant/core.git | 201 | async def async_update(self) -> None:
request = ListEventsRequest(calendar_id=self._calendar_id, search=self._search)
try:
result = await self._calendar_service.async_list_events(request)
except ApiException as err:
_LOGGER.error("Unable to connect to Google: %s", err)
return
# Pick the first visible event and apply offset calculations.
valid_items = filter(self._event_filter, result.items)
event | 30 | 191 | async_update |
|
9 | 0 | 1 | 4 | src/prefect/flow_runners.py | 53,339 | Kubernetes flow runner (PrefectHQ/orion#780)
Add a Kubernetes flow runner | prefect | 9 | Python | 8 | flow_runners.py | def _get_environment_variables(self):
env = self.env.copy()
env.setdefault("PREFECT_ORION_HOST", "http://orion:4200/api")
return env
| be671cbecee46c621dc08ed47bb520f795b34a42 | 24 | https://github.com/PrefectHQ/prefect.git | 29 | def _get_environment_variables(self):
env = self.env.copy()
env.setdefault("PREFECT_ORION_HOST", "http://orion:4 | 5 | 43 | _get_environment_variables |
|
21 | 0 | 4 | 5 | test/prototype_transforms_kernel_infos.py | 194,167 | fix prototype transforms tests with set agg_method (#6934)
* fix prototype transforms tests with set agg_method
* use individual tolerances
* refactor PIL reference test
* increase tolerance for elastic_mask
* fix autocontrast tolerances
* increase tolerance for RandomAutocontrast | vision | 12 | Python | 18 | prototype_transforms_kernel_infos.py | def reference_inputs_convert_color_space_image_tensor():
for args_kwargs in sample_inputs_convert_color_space_image_tensor():
(image_loader, *other_args), kwargs = args_kwargs
if len(image_loader.shape) == 3 and image_loader.dtype == torch.uint8:
yield args_kwargs
| 65769ab7662263a032a14c77e7b0890abb7c3001 | 41 | https://github.com/pytorch/vision.git | 48 | def reference_inputs_convert_color_space_image_tensor():
for args_kwargs in sample_inputs_convert_color_space_image_tensor():
(image_loader, *other_args), kwargs = args_kwargs
if le | 11 | 66 | reference_inputs_convert_color_space_image_tensor |
|
37 | 0 | 1 | 11 | spacy/tests/doc/test_json_doc_conversion.py | 111,478 | Support custom attributes for tokens and spans in json conversion (#11125)
* Add token and span custom attributes to to_json()
* Change logic for to_json
* Add functionality to from_json
* Small adjustments
* Move token/span attributes to new dict key
* Fix test
* Fix the same test but much better
* Add backwards compatibility tests and adjust logic
* Add test to check if attributes not set in underscore are not saved in the json
* Add tests for json compatibility
* Adjust test names
* Fix tests and clean up code
* Fix assert json tests
* small adjustment
* adjust naming and code readability
* Adjust naming, added more tests and changed logic
* Fix typo
* Adjust errors, naming, and small test optimization
* Fix byte tests
* Fix bytes tests
* Change naming and json structure
* update schema
* Update spacy/schemas.py
Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
* Update spacy/tokens/doc.pyx
Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
* Update spacy/tokens/doc.pyx
Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
* Update spacy/schemas.py
Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
* Update schema for underscore attributes
* Adjust underscore schema
* adjust schema tests
Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> | spaCy | 11 | Python | 24 | test_json_doc_conversion.py | def test_doc_to_json_with_token_attributes_missing(doc):
Token.set_extension("token_test", default=False)
Span.set_extension("span_test", default=False)
doc[0:1]._.span_test = "span_attribute"
doc[0]._.token_test = 117
json_doc = doc.to_json(underscore=["span_test"])
assert "underscore_token" in json_doc
assert "underscore_span" in json_doc
assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute"
assert "token_test" not in json_doc["underscore_token"]
assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
| 5afa98aabfc18a23f19b07b13e2cd12ddb6ee009 | 104 | https://github.com/explosion/spaCy.git | 66 | def test_doc_to_json_with_token_attributes_missing(doc):
Token.set_extension("token_test", default=False)
Span.set_extension("span_test", default=False)
doc[0:1]._.span_test = "span_attribute"
doc[0]._.token_test = 117
json_doc = doc.to_json(underscore=["span_test"])
assert "underscore_token" in json_doc
assert "underscore_span" in json_doc
assert json_doc["underscore_span"]["span_test"]["value | 16 | 180 | test_doc_to_json_with_token_attributes_missing |
|
75 | 1 | 1 | 26 | test/test_pipeline.py | 256,681 | Generate code from pipeline (pipeline.to_code()) (#2214)
* pipeline.to_code() with jupyter support
* Update Documentation & Code Style
* add imports
* refactoring
* Update Documentation & Code Style
* docstrings added and refactoring
* Update Documentation & Code Style
* improve imports code generation
* add comment param
* Update Documentation & Code Style
* add simple test
* add to_notebook_cell()
* Update Documentation & Code Style
* introduce helper classes for code gen and eval report gen
* add more tests
* Update Documentation & Code Style
* fix Dict typings
* Update Documentation & Code Style
* validate user input before code gen
* enable urls for to_code()
* Update Documentation & Code Style
* remove all chars except colon from validation regex
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> | haystack | 12 | Python | 55 | test_pipeline.py | def test_PipelineCodeGen_dual_retriever_pipeline():
es_doc_store = ElasticsearchDocumentStore(index="my-index")
es_retriever = ElasticsearchRetriever(document_store=es_doc_store, top_k=20)
dense_doc_store = InMemoryDocumentStore(index="my-index")
emb_retriever = EmbeddingRetriever(
document_store=dense_doc_store, embedding_model="sentence-transformers/all-MiniLM-L6-v2"
)
p_ensemble = Pipeline()
p_ensemble.add_node(component=es_retriever, name="EsRetriever", inputs=["Query"])
p_ensemble.add_node(component=emb_retriever, name="EmbeddingRetriever", inputs=["Query"])
p_ensemble.add_node(
component=JoinDocuments(join_mode="merge"), name="JoinResults", inputs=["EsRetriever", "EmbeddingRetriever"]
)
code = _PipelineCodeGen.generate_code(pipeline=p_ensemble, pipeline_variable_name="p", generate_imports=False)
assert code == (
'elasticsearch_document_store = ElasticsearchDocumentStore(index="my-index")\n'
"es_retriever = ElasticsearchRetriever(document_store=elasticsearch_document_store, top_k=20)\n"
'in_memory_document_store = InMemoryDocumentStore(index="my-index")\n'
'embedding_retriever = EmbeddingRetriever(document_store=in_memory_document_store, embedding_model="sentence-transformers/all-MiniLM-L6-v2")\n'
'join_results = JoinDocuments(join_mode="merge")\n'
"\n"
"p = Pipeline()\n"
'p.add_node(component=es_retriever, name="EsRetriever", inputs=["Query"])\n'
'p.add_node(component=embedding_retriever, name="EmbeddingRetriever", inputs=["Query"])\n'
'p.add_node(component=join_results, name="JoinResults", inputs=["EsRetriever", "EmbeddingRetriever"])'
)
@pytest.mark.elasticsearch | e20f2e0d541805c3afb1f0948fa85f88b2a4f434 | @pytest.mark.elasticsearch | 143 | https://github.com/deepset-ai/haystack.git | 196 | def test_PipelineCodeGen_dual_retriever_pipeline():
es_doc_store = ElasticsearchDocumentStore(index="my-index")
es_retriever = ElasticsearchRetriever(document_store=es_doc_store, top_k=20)
dense_doc_store = InMemoryDocumentStore(index="my-index")
emb_retriever = EmbeddingRetriever(
document_store=dense_doc_store, embedding_model="sentence-transformers/all-MiniLM-L6-v2"
)
p_ensemble = Pipeline()
p_ensemble.add_node(component=es_retriever, name="EsRetriever", inputs=["Query"])
p_ensemble.add_node(component=emb_retriever, name="EmbeddingRetriever", inputs=["Query"])
p_ensemble.add_node(
component=JoinDocuments(join_mode="merge"), name="JoinResults", inputs=["EsRetriever", "EmbeddingRetriever"]
)
code = _PipelineCodeGen.generate_code(pipeline=p_ensemble, pipeline_variable_name="p", generate_imports=False)
assert code == (
'elasticsearch_document_store = ElasticsearchDocumentStore(index="my-index")\n'
"es_retriever = ElasticsearchRetriever(document_store=elasticsearch_document_store, top_k=20)\n"
'in_memory_document_store = InMemoryDocumentStore(index="my-index")\n'
'embedding_retriever = EmbeddingRetriever(document_store=in_memory_document_store, embedding_model="sentence-transformers/all-MiniLM-L6-v2")\n'
'join_results = JoinDocuments(join_mode="merge")\n'
"\n"
"p = Pipeline()\n"
'p.add_node(component=es_retriever, name="EsRetriever", inputs=["Query"])\n'
'p.add_node(component=embedding_retriever, name="EmbeddingRetriever", inputs=["Query"])\n'
'p.add_node(component=join_results, name="JoinResults", inputs=["EsRetriever | 30 | 275 | test_PipelineCodeGen_dual_retriever_pipeline |
47 | 0 | 4 | 10 | mindsdb/interfaces/stream/stream.py | 114,674 | fix | mindsdb | 13 | Python | 39 | stream.py | def setup(self, db_alias):
try:
integration = self.integration_controller.get(db_alias)
if integration is None:
raise Exception(f'Unkonw database integration: {db_alias}')
if integration.get('type') not in self.known_dbs:
raise Exception(f'Unkonw database integration type for: {db_alias}')
self.known_dbs[integration['type']](self.config, db_alias, integration).setup()
except Exception as e:
logger.warning('Failed to setup stream for ' + db_alias + f', error: {e}')
| 2fa2805fd6bbcf3819e8da4f3aba3d4618db082f | 87 | https://github.com/mindsdb/mindsdb.git | 145 | def setup(self, db_alias):
try:
integration = self.integration_co | 12 | 156 | setup |
|
8 | 0 | 1 | 4 | jina/serve/instrumentation/__init__.py | 13,439 | fix: health check exception with opentelemetry tracing interceptors (#5392) | jina | 8 | Python | 8 | __init__.py | def _new_timer(self):
return self.__class__(
self._summary_metric, self._histogram, self._histogram_metric_labels
)
| f738d34bfc75437c7203f5746cc602145088d220 | 22 | https://github.com/jina-ai/jina.git | 32 | def _new_timer(self):
return self.__class__(
self._summary_metric, self._histogram, self._histo | 6 | 33 | _new_timer |
|
17 | 0 | 2 | 6 | python/ray/autoscaler/_private/aliyun/utils.py | 130,364 | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | ray | 12 | Python | 14 | utils.py | def describe_vpcs(self):
request = DescribeVpcsRequest()
response = self._send_request(request)
if response is not None:
return response.get("Vpcs").get("Vpc")
return None
| 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | 39 | https://github.com/ray-project/ray.git | 63 | def describe_vpcs(self):
request | 7 | 70 | describe_vpcs |
|
16 | 0 | 1 | 7 | tests/test_signal_handler.py | 191,036 | Reformat to 80 chars and mypy.ini | thumbor | 9 | Python | 14 | test_signal_handler.py | def test_signal_handler_calls_add_callback_from_signal(self, ioloop_mock):
ioloop_instance_mock = mock.Mock()
ioloop_mock.return_value = ioloop_instance_mock
signal_handler(mock.Mock(), mock.Mock(), signal.SIGTERM, mock.Mock())
ioloop_instance_mock.add_callback_from_signal.assert_called_with(
mock.ANY
)
| 301124c5b377fa56b940d298900dbc5816dbc24e | 53 | https://github.com/thumbor/thumbor.git | 61 | def test_signal_handler_calls_add_callback_from_signal(self, ioloop_mock):
ioloop_instance_mo | 13 | 84 | test_signal_handler_calls_add_callback_from_signal |
|
47 | 0 | 4 | 12 | freqtrade/exchange/exchange.py | 149,003 | exchange.get_leverage_tiers and exchange.get_market_leverage_tiers | freqtrade | 15 | Python | 35 | exchange.py | def get_market_leverage_tiers(self, symbol) -> List[Dict]:
try:
return self._api.fetch_market_leverage_tiers(symbol)
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
raise TemporaryError(
f'Could not load leverage tiers for {symbol}'
f' due to {e.__class__.__name__}. Message: {e}'
) from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
| 6cd01c45d5e57d357a6b1c3495ec035e0610fd78 | 74 | https://github.com/freqtrade/freqtrade.git | 159 | def get_market_leverage_tiers(self, symbol) -> List[Dict]:
try:
return self._api.fetch_market_leverage_tiers(symbol)
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
raise TemporaryError(
f'Could not load leverage tiers for {symbol}'
f' due to {e.__class__.__name_ | 18 | 134 | get_market_leverage_tiers |
|
86 | 1 | 6 | 13 | tests/packaged_modules/test_audiofolder.py | 105,547 | Add support for CSV metadata files to ImageFolder (#4837)
* Add support for CSV metadata files to ImageFolder
* Add tests
* Update doc
* Add one more test
* Add identical tests for audiofolder
* Docs for audiofolder
* Address review comments
* Minor adjustments | datasets | 15 | Python | 59 | test_audiofolder.py | def test_data_files_with_metadata_and_multiple_splits(streaming, cache_dir, data_files_with_two_splits_and_metadata):
data_files = data_files_with_two_splits_and_metadata
audiofolder = AudioFolder(data_files=data_files, cache_dir=cache_dir)
audiofolder.download_and_prepare()
datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset()
for split, data_files in data_files.items():
expected_num_of_audios = len(data_files) - 1 # don't count the metadata file
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_audios
# make sure each sample has its own audio and metadata
assert len(set(example["audio"]["path"] for example in dataset)) == expected_num_of_audios
assert len(set(example["text"] for example in dataset)) == expected_num_of_audios
assert all(example["text"] is not None for example in dataset)
@require_sndfile
@pytest.mark.parametrize("streaming", [False, True]) | 7380140accf522a4363bb56c0b77a4190f49bed6 | @require_sndfile
@pytest.mark.parametrize("streaming", [False, True]) | 135 | https://github.com/huggingface/datasets.git | 155 | def test_data_files_with_metadata_and_multiple_splits(streaming, cache_dir, data_files_with_two_splits_and_metadata):
data_files = data_files_with_two_splits_and_metadata
audiofolder = AudioFolder(data_files=data_files, cache_dir=cache_dir)
audiofolder.download_and_prepare()
datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset()
for split, data_files in data_files.items():
expected_num_of_audio | 24 | 242 | test_data_files_with_metadata_and_multiple_splits |
49 | 0 | 1 | 7 | pandas/tests/scalar/interval/test_interval.py | 166,838 | ENH: consistency of input args for boundaries - Interval (#46522) | pandas | 8 | Python | 32 | test_interval.py | def test_is_empty(self, left, right, closed):
# GH27219
# non-empty always return False
iv = Interval(left, right, closed)
assert iv.is_empty is False
# same endpoint is empty except when inclusive='both' (contains one point)
iv = Interval(left, left, closed)
result = iv.is_empty
expected = closed != "both"
assert result is expected
| 7e23a37e1c5bda81234801a6584563e2880769eb | 51 | https://github.com/pandas-dev/pandas.git | 111 | def test_is_empty(self, left, right, closed):
# GH27219
# non-empty always return False
iv = Interval(left, right, closed)
assert iv.is_empty is False
# same endpoint is empty except when inclusive='both' (contains one point)
iv = Interval(left, left, closed)
result = iv.is_empty
expected = closed != "both"
assert | 10 | 79 | test_is_empty |
|
376 | 0 | 1 | 147 | tests/components/websocket_api/test_commands.py | 293,521 | Websocket api to subscribe to entities (payloads reduced by ~80%+ vs state_changed events) (#67891) | core | 18 | Python | 100 | test_commands.py | async def test_subscribe_unsubscribe_entities(hass, websocket_client, hass_admin_user):
hass.states.async_set("light.permitted", "off", {"color": "red"})
original_state = hass.states.get("light.permitted")
assert isinstance(original_state, State)
state_dict = {
"attributes": dict(original_state.attributes),
"context": dict(original_state.context.as_dict()),
"entity_id": original_state.entity_id,
"last_changed": original_state.last_changed.isoformat(),
"last_updated": original_state.last_updated.isoformat(),
"state": original_state.state,
}
hass_admin_user.groups = []
hass_admin_user.mock_policy({"entities": {"entity_ids": {"light.permitted": True}}})
await websocket_client.send_json({"id": 7, "type": "subscribe_entities"})
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == "event"
assert isinstance(msg["event"]["a"]["light.permitted"]["c"], str)
assert msg["event"] == {
"a": {
"light.permitted": {
"a": {"color": "red"},
"c": ANY,
"lc": ANY,
"s": "off",
}
}
}
hass.states.async_set("light.not_permitted", "on")
hass.states.async_set("light.permitted", "on", {"color": "blue"})
hass.states.async_set("light.permitted", "on", {"effect": "help"})
hass.states.async_set(
"light.permitted", "on", {"effect": "help", "color": ["blue", "green"]}
)
hass.states.async_remove("light.permitted")
hass.states.async_set("light.permitted", "on", {"effect": "help", "color": "blue"})
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == "event"
assert msg["event"] == {
"c": {
"light.permitted": {
"+": {
"a": {"color": "blue"},
"c": ANY,
"lc": ANY,
"s": "on",
}
}
}
}
change_set = msg["event"]["c"]["light.permitted"]
additions = deepcopy(change_set["+"])
_apply_entities_changes(state_dict, change_set)
assert state_dict == {
"attributes": {"color": "blue"},
"context": {
"id": additions["c"],
"parent_id": None,
"user_id": None,
},
"entity_id": "light.permitted",
"last_changed": additions["lc"],
"last_updated": additions["lc"],
"state": "on",
}
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == "event"
assert msg["event"] == {
"c": {
"light.permitted": {
"+": {
"a": {"effect": "help"},
"c": ANY,
"lu": ANY,
},
"-": {"a": ["color"]},
}
}
}
change_set = msg["event"]["c"]["light.permitted"]
additions = deepcopy(change_set["+"])
_apply_entities_changes(state_dict, change_set)
assert state_dict == {
"attributes": {"effect": "help"},
"context": {
"id": additions["c"],
"parent_id": None,
"user_id": None,
},
"entity_id": "light.permitted",
"last_changed": ANY,
"last_updated": additions["lu"],
"state": "on",
}
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == "event"
assert msg["event"] == {
"c": {
"light.permitted": {
"+": {
"a": {"color": ["blue", "green"]},
"c": ANY,
"lu": ANY,
}
}
}
}
change_set = msg["event"]["c"]["light.permitted"]
additions = deepcopy(change_set["+"])
_apply_entities_changes(state_dict, change_set)
assert state_dict == {
"attributes": {"effect": "help", "color": ["blue", "green"]},
"context": {
"id": additions["c"],
"parent_id": None,
"user_id": None,
},
"entity_id": "light.permitted",
"last_changed": ANY,
"last_updated": additions["lu"],
"state": "on",
}
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == "event"
assert msg["event"] == {"r": ["light.permitted"]}
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == "event"
assert msg["event"] == {
"a": {
"light.permitted": {
"a": {"color": "blue", "effect": "help"},
"c": ANY,
"lc": ANY,
"s": "on",
}
}
}
| 0d8f649bd65c8c54cd3503dd75485d3ec35d6076 | 878 | https://github.com/home-assistant/core.git | 1,461 | async def test_subscribe_unsubscribe_entities(hass, websocket_client, hass_admin_user):
hass.states.async_set("light.permitted", "off", {"color": "red"})
original_state = hass.states.get("light.permitted")
assert isinstance(original_state, State)
state_dict = {
"attributes": dict(original_state.attributes),
"context": dict(original_state.context.as_dict()),
"entity_id": original_state.entity_id,
"la | 34 | 1,636 | test_subscribe_unsubscribe_entities |
|
42 | 1 | 1 | 8 | tests/sentry/relay/test_config.py | 87,296 | feat(dyn-sampling): Switch to new feature flag multiplexer in projectconfig (#40498)
This PR switch to new feature flag multiplexer
in projectconfig. | sentry | 12 | Python | 38 | test_config.py | def test_get_experimental_config(mock_sentry_sdk, _, default_project):
keys = ProjectKey.objects.filter(project=default_project)
with Feature(
{"organizations:dynamic-sampling": True, "organizations:server-side-sampling": True}
):
# Does not raise:
cfg = get_project_config(default_project, full_config=True, project_keys=keys)
# Key is missing from config:
assert "dynamicSampling" not in cfg.to_dict()["config"]
assert mock_sentry_sdk.capture_exception.call_args == mock.call(SOME_EXCEPTION)
@pytest.mark.django_db
@pytest.mark.parametrize("has_custom_filters", [False, True]) | c8bfd65f261769da2565ca4240f11da6e820a7e4 | @pytest.mark.django_db
@pytest.mark.parametrize("has_custom_filters", [False, True]) | 74 | https://github.com/getsentry/sentry.git | 78 | def test_get_experimental_config(mock_sentry_sdk, _, default_project):
keys = ProjectKey.objects.filter(project=default_project)
with Feature(
{"organizations:dynamic-sampling": True, "organizations:server-side-sampling": True}
):
# Does not raise:
| 24 | 156 | test_get_experimental_config |
21 | 0 | 1 | 7 | examples/addons/options-configure.py | 251,174 | use Python 3.9+ typing | mitmproxy | 9 | Python | 18 | options-configure.py | def load(self, loader):
loader.add_option(
name = "addheader",
typespec = Optional[int],
default = None,
help = "Add a header to responses",
)
| fdde9ba3b3caaa2654048cec0af07bfcc3a6a3f8 | 31 | https://github.com/mitmproxy/mitmproxy.git | 78 | def load(self, loader):
loader.add_option(
name = "addhe | 10 | 48 | load |
|
6 | 1 | 1 | 2 | modin/core/execution/unidist/implementations/pandas_on_unidist/partitioning/partition.py | 155,175 | FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059)
Signed-off-by: Igoshev, Iaroslav <iaroslav.igoshev@intel.com> | modin | 9 | Python | 6 | partition.py | def _get_index_and_columns_size(df):
return len(df.index), len(df.columns)
@unidist.remote(num_returns=4) | 193505fdf0c984743397ba3df56262f30aee13a8 | @unidist.remote(num_returns=4) | 20 | https://github.com/modin-project/modin.git | 11 | def _get_index_and_columns_size(df):
return len(df.index), len(df.columns)
@unidist.remote(num_returns=4) | 8 | 50 | _get_index_and_columns_size |
96 | 0 | 3 | 49 | sandbox/will/basic.py | 184,172 | more docs | textual | 14 | Python | 61 | basic.py | def compose(self) -> ComposeResult:
table = DataTable()
self.scroll_to_target = Tweet(TweetBody())
yield Static(
Text.from_markup(
"[b]This is a [u]Textual[/u] app, running in the terminal"
),
id="header",
)
yield from (
Tweet(TweetBody()),
Widget(
Static(
Syntax(CODE, "python", line_numbers=True, indent_guides=True),
classes="code",
),
classes="scrollable",
),
table,
Error(),
Tweet(TweetBody(), classes="scrollbar-size-custom"),
Warning(),
Tweet(TweetBody(), classes="scroll-horizontal"),
Success(),
Tweet(TweetBody(), classes="scroll-horizontal"),
Tweet(TweetBody(), classes="scroll-horizontal"),
Tweet(TweetBody(), classes="scroll-horizontal"),
Tweet(TweetBody(), classes="scroll-horizontal"),
Tweet(TweetBody(), classes="scroll-horizontal"),
)
yield Widget(id="footer")
yield Widget(
Widget(classes="title"),
Widget(classes="user"),
OptionItem(),
OptionItem(),
OptionItem(),
Widget(classes="content"),
id="sidebar",
)
table.add_column("Foo", width=20)
table.add_column("Bar", width=20)
table.add_column("Baz", width=20)
table.add_column("Foo", width=20)
table.add_column("Bar", width=20)
table.add_column("Baz", width=20)
table.zebra_stripes = True
for n in range(100):
table.add_row(*[f"Cell ([b]{n}[/b], {col})" for col in range(6)])
| fa4b971bffb9f488d155981794b7f0bf3b657c72 | 308 | https://github.com/Textualize/textual.git | 587 | def compose(self) -> ComposeResult:
table = DataTable()
self.scroll_to_target = Tweet(TweetBody())
yield Static(
Text.from_markup(
"[b]This is a [u]Textual[/u] app, running in the terminal"
),
id="header",
)
yield from (
Tweet(TweetBody()),
Widget(
Static(
Syntax(CODE, "python", line_numbers=True, indent_guides=True),
classes="code",
),
classes="scrollable",
),
table,
Error(),
Tweet(TweetBody(), classes="scrollbar-size-custom"),
Warning(),
Tweet(TweetBody(), classes="scroll-horizontal"),
Success(),
Tweet(TweetBody(), classes="scroll-horizontal"),
Tweet(TweetBody(), classes="scroll-horizontal"),
Tweet(TweetBody(), classes="scroll-horizontal"),
Tweet(TweetBody(), classes="scroll-horizontal"),
Tweet(TweetBody(), classes="scroll-horizontal"),
)
yield Widget(id="footer")
yield Widget(
Widget(classes="title"),
Widget(classes="user"),
OptionItem(),
OptionItem(),
OptionItem(),
Widget(classes="content"),
id="sidebar",
)
table.add_column("Foo", width=20)
table.add_column("Bar", width=20)
table.add | 29 | 516 | compose |
|
18 | 0 | 2 | 19 | homeassistant/components/sia/sia_entity_base.py | 291,940 | Add Connectivity sensor to SIA (#64305)
* implemented connectivity sensor
* further cleanup off update code
* cleanup and tighter behaviour for attributes
* added seperate connectivity class to binary sensor
* callbacks and keys
* redid name and unique_id logic, non-breaking result
* using entry more in inits
* Fix import
* fix ping_interval in sia_entity_base
* added ping_interval default to next
* fixed next
Co-authored-by: Martin Hjelmare <marhje52@gmail.com> | core | 12 | Python | 17 | sia_entity_base.py | async def async_added_to_hass(self) -> None:
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIA_EVENT.format(self.port, self.account),
self.async_handle_event,
)
)
self.handle_last_state(await self.async_get_last_state())
if self._attr_available:
self.async_create_post_interval_update_cb()
| af4e37339a39badd5596e8bc9ba86d6c1994aa1b | 58 | https://github.com/home-assistant/core.git | 131 | async def async_added_to_hass(self) -> None:
self.async_on_remove(
async_dispatcher_connect(
self.hass,
| 14 | 95 | async_added_to_hass |
|
31 | 0 | 3 | 23 | keras/saving/saved_model/saved_model_test.py | 278,345 | resolve line-too-long in saving | keras | 9 | Python | 27 | saved_model_test.py | def test_custom_metric_model(self):
# TODO(b/134519980): Issue with `model.fit` if the model call function
# uses a `tf.function` in graph mode.
if not tf.executing_eagerly():
return
x = np.random.random((1, 3))
y = np.random.random((1, 4))
| b0ffc0031e9c1964e7398ca47c6666bbfc0d5086 | 180 | https://github.com/keras-team/keras.git | 76 | def test_custom_metric_model(self):
# TODO(b/134519980): Issue with `model.fit` if the model call function
# uses a `tf.function` in graph mode.
| 8 | 69 | test_custom_metric_model |
|
50 | 0 | 2 | 21 | keras/saving/save_weights_test.py | 275,973 | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | keras | 12 | Python | 34 | save_weights_test.py | def test_sequential_weight_loading(self):
if h5py is None:
return
h5_path = self._save_model_dir("test.h5")
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
x = np.random.random((batch_size, input_dim))
ref_y = model.predict(x)
model.save_weights(h5_path)
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(y, ref_y)
| 84afc5193d38057e2e2badf9c889ea87d80d8fbf | 166 | https://github.com/keras-team/keras.git | 241 | def test_sequential_weight_loading(self):
if h5py is None:
| 26 | 271 | test_sequential_weight_loading |
|
121 | 1 | 15 | 35 | sympy/series/gruntz.py | 196,818 | Reordered imports 2 | sympy | 13 | Python | 73 | gruntz.py | def sign(e, x):
if not isinstance(e, Basic):
raise TypeError("e should be an instance of Basic")
if e.is_positive:
return 1
elif e.is_negative:
return -1
elif e.is_zero:
return 0
elif not e.has(x):
from sympy.simplify import logcombine
e = logcombine(e)
return _sign(e)
elif e == x:
return 1
elif e.is_Mul:
a, b = e.as_two_terms()
sa = sign(a, x)
if not sa:
return 0
return sa * sign(b, x)
elif isinstance(e, exp):
return 1
elif e.is_Pow:
if e.base == S.Exp1:
return 1
s = sign(e.base, x)
if s == 1:
return 1
if e.exp.is_Integer:
return s**e.exp
elif isinstance(e, log):
return sign(e.args[0] - 1, x)
# if all else fails, do it the hard way
c0, e0 = mrv_leadterm(e, x)
return sign(c0, x)
@debug
@timeit
@cacheit | f757f3daae6e11ea0cfb7dadc133274d8d74315f | @debug
@timeit
@cacheit | 209 | https://github.com/sympy/sympy.git | 330 | def sign(e, x):
if not isinstance(e, Basic):
raise TypeError("e should be an instance of Basic")
| 34 | 339 | sign |
22 | 1 | 1 | 14 | tests/sentry/tasks/test_relay.py | 90,464 | feat(proj-config): Add v3 of proj config endpoint (#34746)
This new version of the endpoint doesn't perform any computation on the
project configs. Instead, it performs computation async, as follows:
- If a requested project config exists in the cache, the endpoint
returns it.
- If a requested project config doesn't exist in the cache, the endpoint
schedules a new task to compute that config, and returns a pending
response.
- If a requested project config doesn't exist in the cache, but a task
is already scheduled, the endpoint returns a pending request.
Tasks are debounced based on the public key of the project. Pending
projects are returned as a list part of the config in the response, for
example:
```
{
proj1_key: { proj1_config },
proj2_key: { proj2_config },
pending: [proj3_key, proj4_key]
}
```
The `pending` entry only exists if there is at least one pending
project.
**Redis cache changes**
Redis is now a requirement for the project configs cache, since the endpoint can't operate without redis anymore. On the other hand, the debouncing cache hasn't been updated because it's not needed for local development (requests will never be debounced and always processed). | sentry | 8 | Python | 19 | test_relay.py | def redis_cache(monkeypatch):
monkeypatch.setattr(
"django.conf.settings.SENTRY_RELAY_PROJECTCONFIG_CACHE",
"sentry.relay.projectconfig_cache.redis.RedisProjectConfigCache",
)
cache = RedisProjectConfigCache()
monkeypatch.setattr("sentry.relay.projectconfig_cache.set_many", cache.set_many)
monkeypatch.setattr("sentry.relay.projectconfig_cache.delete_many", cache.delete_many)
monkeypatch.setattr("sentry.relay.projectconfig_cache.get", cache.get)
monkeypatch.setattr(
"django.conf.settings.SENTRY_RELAY_PROJECTCONFIG_DEBOUNCE_CACHE",
"sentry.relay.projectconfig_debounce_cache.redis.RedisProjectConfigDebounceCache",
)
return cache
@pytest.fixture | d9e850f2723a9d4919b0038d2f6cb59321eef295 | @pytest.fixture | 60 | https://github.com/getsentry/sentry.git | 75 | def redis_cache(monkeypatch):
monkeypatch.set | 10 | 113 | redis_cache |
21 | 0 | 2 | 6 | fastai/metrics.py | 190,387 | Upgrading to support latest Pytorch version | DeOldify | 13 | Python | 18 | metrics.py | def _precision(self):
prec = torch.diag(self.cm) / self.cm.sum(dim=0)
if self.average is None: return prec
else:
weights = self._weights(avg=self.average)
return (prec * weights).sum()
| 4fc3616712edb19179b17dd270ad6cf63abf99c2 | 59 | https://github.com/jantic/DeOldify.git | 63 | def _precision(self):
prec = torch.diag(self.cm) / self.cm.sum(dim=0)
if self.average is None: return prec
else:
weights = self._w | 12 | 95 | _precision |
|
10 | 0 | 1 | 3 | python/ray/experimental/state/state_manager.py | 141,120 | [State Observability] Ray log alpha API (#24964)
This is the PR to implement ray log to the server side. The PR is continued from #24068.
The PR supports two endpoints;
/api/v0/logs # list logs of the node id filtered by the given glob.
/api/v0/logs/{[file | stream]}?filename&pid&actor_id&task_id&interval&lines # Stream the requested file log. The filename can be inferred by pid/actor_id/task_id
Some tests need to be re-written, I will do it soon.
As a follow-up after this PR, there will be 2 PRs.
PR to add actual CLI
PR to remove in-memory cached logs and do on-demand query for actor/worker logs | ray | 9 | Python | 10 | state_manager.py | def get_all_registered_agent_ids(self) -> List[str]:
assert len(self._log_agent_stub) == len(self._runtime_env_agent_stub)
return self._runtime_env_agent_stub.keys()
| 00e3fd75f33d762387ab6fa379743cd21c409ea6 | 32 | https://github.com/ray-project/ray.git | 23 | def get_all_registered_agent_ids(self) -> List[str]:
assert len(self._log_agent_stub) == len(self._runtime_env_agent_stub)
return self._runtim | 8 | 51 | get_all_registered_agent_ids |
|
130 | 1 | 2 | 72 | networkx/linalg/laplacianmatrix.py | 176,191 | Use scipy.sparse array datastructure (#5139)
* Step 1: use sparse arrays in nx.to_scipy_sparse_matrix.
Seems like a reasonable place to start.
nx.to_scipy_sparse_matrix is one of the primary interfaces to
scipy.sparse from within NetworkX.
* 1: Use np.outer instead of mult col/row vectors
Fix two instances in modularitymatrix where a new 2D array was being
created via an outer product of two \"vectors\".
In the matrix case, this was a row vector \* a column vector. In the
array case this can be disambiguated by being explicit with np.outer.
* Update _transition_matrix in laplacianmatrix module
- A few instances of matrix multiplication operator
- Add np.newaxis + transpose to get shape right for broadcasting
- Explicitly convert e.g. sp.sparse.spdiags to a csr_array.
* Update directed_combinitorial_laplacian w/ sparse array.
- Wrap spdiags in csr_array and update matmul operators.
* Rm matrix-specific code from lgc and hmn modules
- Replace .A call with appropriate array semantics
- wrap sparse.diags in csr_array.
* Change hits to use sparse array semantics.
- Replace * with @
- Remove superfluous calls to flatten.
* Update sparse matrix usage in layout module.
- Simplify lil.getrowview call
- Wrap spdiags in csr_array.
* lil_matrix -> lil_array in graphmatrix.py.
* WIP: Start working on algebraic connectivity module.
* Incorporate auth mat varname feedback.
* Revert 1D slice and comment for 1D sparse future.
* Add TODOs: rm csr_array wrapper around spdiags etc.
* WIP: cleanup algebraicconn: tracemin_fiedler.
* Typo.
* Finish reviewing algebraicconnectivity.
* Convert bethe_hessian matrix to use sparse arrays.
* WIP: update laplacian.
Update undirected laplacian functions.
* WIP: laplacian - add comment about _transition_matrix return types.
* Finish laplacianmatrix review.
* Update attrmatrix.
* Switch to official laplacian function.
* Update pagerank to use sparse array.
* Switch bipartite matrix to sparse arrays.
* Check from_scipy_sparse_matrix works with arrays.
Modifies test suite.
* Apply changes from review.
* Fix failing docstring tests.
* Fix missing axis for in-place multiplication.
* Use scipy==1.8rc2
* Use matrix multiplication
* Fix PyPy CI
* [MRG] Create plot_subgraphs.py example (#5165)
* Create plot_subgraphs.py
https://github.com/networkx/networkx/issues/4220
* Update plot_subgraphs.py
black
* Update plot_subgraphs.py
lint plus font_size
* Update plot_subgraphs.py
added more plots
* Update plot_subgraphs.py
removed plots from the unit test and added comments
* Update plot_subgraphs.py
lint
* Update plot_subgraphs.py
typos fixed
* Update plot_subgraphs.py
added nodes to the plot of the edges removed that was commented out for whatever reason
* Update plot_subgraphs.py
revert the latest commit - the line was commented out for a reason - it's broken
* Update plot_subgraphs.py
fixed node color issue
* Update plot_subgraphs.py
format fix
* Update plot_subgraphs.py
forgot to draw the nodes... now fixed
* Fix sphinx warnings about heading length.
* Update examples/algorithms/plot_subgraphs.py
* Update examples/algorithms/plot_subgraphs.py
Co-authored-by: Ross Barnowski <rossbar@berkeley.edu>
Co-authored-by: Dan Schult <dschult@colgate.edu>
* Add traveling salesman problem to example gallery (#4874)
Adds an example of the using Christofides to solve the TSP problem to the example galery.
Co-authored-by: Ross Barnowski <rossbar@berkeley.edu>
* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() (#5037)
* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges()
* Resolved Requested Changes
* Revert changes to degree docstrings.
* Update comments in example.
* Apply wording to edges method in all graph classes.
Co-authored-by: Ross Barnowski <rossbar@berkeley.edu>
* Compatibility updates from testing with numpy/scipy/pytest rc's (#5226)
* Rm deprecated scipy subpkg access.
* Use recwarn fixture in place of deprecated pytest pattern.
* Rm unnecessary try/except from tests.
* Replace internal `close` fn with `math.isclose`. (#5224)
* Replace internal close fn with math.isclose.
* Fix lines in docstring examples.
* Fix Python 3.10 deprecation warning w/ int div. (#5231)
* Touchups and suggestions for subgraph gallery example (#5225)
* Simplify construction of G with edges rm'd
* Rm unused graph attribute.
* Shorten categorization by node type.
* Simplify node coloring.
* Simplify isomorphism check.
* Rm unit test.
* Rm redundant plotting of each subgraph.
* Use new package name (#5234)
* Allowing None edges in weight function of bidirectional Dijkstra (#5232)
* added following feature also to bidirectional dijkstra: The weight function can be used to hide edges by returning None.
* changed syntax for better readability and code duplicate avoidance
Co-authored-by: Hohmann, Nikolas <nikolas.hohmann@tu-darmstadt.de>
* Add an FAQ about assigning issues. (#5182)
* Add FAQ about assigning issues.
* Add note about linking issues from new PRs.
* Update dev deps (#5243)
* Update minor doc issues with tex notation (#5244)
* Add FutureWarnings to fns that return sparse matrices
- biadjacency_matrix.
- bethe_hessian_matrix.
- incidence_matrix.
- laplacian functions.
- modularity_matrix functions.
- adjacency_matrix.
* Add to_scipy_sparse_array and use it everywhere.
Add a new conversion function to preserve array semantics internally
while not altering behavior for users.
Also adds FutureWarning to to_scipy_sparse_matrix.
* Add from_scipy_sparse_array. Supercedes from_scipy_sparse_matrix.
* Handle deprecations in separate PR.
* Fix docstring examples.
Co-authored-by: Mridul Seth <mail@mriduls.com>
Co-authored-by: Jarrod Millman <jarrod.millman@gmail.com>
Co-authored-by: Andrew Knyazev <andrew.knyazev@ucdenver.edu>
Co-authored-by: Dan Schult <dschult@colgate.edu>
Co-authored-by: eskountis <56514439+eskountis@users.noreply.github.com>
Co-authored-by: Anutosh Bhat <87052487+anutosh491@users.noreply.github.com>
Co-authored-by: NikHoh <nikhoh@web.de>
Co-authored-by: Hohmann, Nikolas <nikolas.hohmann@tu-darmstadt.de>
Co-authored-by: Sultan Orazbayev <contact@econpoint.com>
Co-authored-by: Mridul Seth <mail@mriduls.com> | networkx | 12 | Python | 88 | laplacianmatrix.py | def normalized_laplacian_matrix(G, nodelist=None, weight="weight"):
r
import numpy as np
import scipy as sp
import scipy.sparse # call as sp.sparse
if nodelist is None:
nodelist = list(G)
A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr")
n, m = A.shape
diags = A.sum(axis=1)
# TODO: rm csr_array wrapper when spdiags can produce arrays
D = sp.sparse.csr_array(sp.sparse.spdiags(diags, 0, m, n, format="csr"))
L = D - A
with sp.errstate(divide="ignore"):
diags_sqrt = 1.0 / np.sqrt(diags)
diags_sqrt[np.isinf(diags_sqrt)] = 0
# TODO: rm csr_array wrapper when spdiags can produce arrays
DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, m, n, format="csr"))
import warnings
warnings.warn(
"normalized_laplacian_matrix will return a scipy.sparse array instead of a matrix in Networkx 3.0.",
FutureWarning,
stacklevel=2,
)
# TODO: rm csr_matrix wrapper for NX 3.0
return sp.sparse.csr_matrix(DH @ (L @ DH))
###############################################################################
# Code based on
# https://bitbucket.org/bedwards/networkx-community/src/370bd69fc02f/networkx/algorithms/community/
@not_implemented_for("undirected")
@not_implemented_for("multigraph") | 5dfd57af2a141a013ae3753e160180b82bec9469 | @not_implemented_for("undirected")
@not_implemented_for("multigraph") | 197 | https://github.com/networkx/networkx.git | 220 | def normalized_laplacian_matrix(G, nodelist=None, weight="weight"):
r
import numpy as np
import scipy as sp
import scipy.sparse # call as sp.sparse
if nodelist is None:
nodelist = list(G)
A = | 36 | 336 | normalized_laplacian_matrix |
13 | 1 | 1 | 3 | keras/preprocessing/image.py | 268,947 | Copy image utils from keras_preprocessing directly into core keras
This is not new code, we are just moving these utilities directly
into keras from keras-preprocessing.
For the library code, just fixed linting errors.
For the test code, had to do more major changes to port from pytest, but
hopefully any errors have been caught by the tests themselves.
PiperOrigin-RevId: 427274651 | keras | 9 | Python | 13 | image.py | def random_channel_shift(x, intensity_range, channel_axis=0):
intensity = np.random.uniform(-intensity_range, intensity_range)
return apply_channel_shift(x, intensity, channel_axis=channel_axis)
@keras_export('keras.preprocessing.image.apply_brightness_shift') | 373ad97c72ed1ac4b6898e85b2cfd7b016e4b469 | @keras_export('keras.preprocessing.image.apply_brightness_shift') | 36 | https://github.com/keras-team/keras.git | 15 | def random_channel_shift(x, intensity_range, channel_axis=0):
intensi | 10 | 66 | random_channel_shift |
134 | 0 | 5 | 46 | erpnext/patches/v14_0/migrate_remarks_from_gl_to_payment_ledger.py | 69,326 | refactor: remove duplicate entries on remarks migration patch | erpnext | 25 | Python | 85 | migrate_remarks_from_gl_to_payment_ledger.py | def execute():
if frappe.reload_doc("accounts", "doctype", "payment_ledger_entry"):
gle = qb.DocType("GL Entry")
ple = qb.DocType("Payment Ledger Entry")
# get ple and their remarks from GL Entry
pl_entries = (
qb.from_(ple)
.left_join(gle)
.on(
(ple.account == gle.account)
& (ple.party_type == gle.party_type)
& (ple.party == gle.party)
& (ple.voucher_type == gle.voucher_type)
& (ple.voucher_no == gle.voucher_no)
& (ple.company == gle.company)
)
.select(
ple.company,
ple.account,
ple.party_type,
ple.party,
ple.voucher_type,
ple.voucher_no,
gle.remarks.as_("gle_remarks"),
)
.where((ple.delinked == 0) & (gle.is_cancelled == 0))
.run(as_dict=True)
)
pl_entries = remove_duplicate_entries(pl_entries)
if pl_entries:
# split into multiple batches, update and commit for each batch
batch_size = 1000
for batch in create_batch(pl_entries, batch_size):
for entry in batch:
query = (
qb.update(ple)
.set(ple.remarks, entry.gle_remarks)
.where(
(ple.company == entry.company)
& (ple.account == entry.account)
& (ple.party_type == entry.party_type)
& (ple.party == entry.party)
& (ple.voucher_type == entry.voucher_type)
& (ple.voucher_no == entry.voucher_no)
)
)
query.run()
frappe.db.commit()
| 518ab93e039d68827506c3ac92db3c09aea644e3 | 296 | https://github.com/frappe/erpnext.git | 86 | def execute():
if frappe.reload_doc("accounts", "doctype", "payment_ledger_entry"):
gle = qb.DocType("GL Entry")
ple = qb.DocType("Payment Ledger Entry")
# get ple and their remarks from GL Entry
pl_entries = (
qb.from_(ple)
.left_join(gle)
.on(
(ple.account == gle.account)
& (ple.party_type == gle.party_type)
& (ple.party == gle.party)
& (ple.voucher_type == gle.voucher_type)
& (ple.voucher_no == gle.voucher_no)
& (ple.company == gle.company)
)
.select(
ple.company,
ple.account,
ple.party_type,
ple.party,
ple.voucher_type,
ple.voucher_no,
gle.remarks.as_("gle_remarks"),
)
.where((ple.delinked == 0) & (gle.is_cancelled == 0))
.run(as_dict=True)
)
pl_entries = remove_duplicate_entries(pl_entries)
if pl_entries:
# split into multiple batches, update and commit for each batch
batch_size = 1000
for batch in create_batch(pl_entries, batch_size):
for entry in batch:
query = (
qb.update(ple)
.set(ple.remarks, entry.gle_remarks)
.where(
(ple.company == entry.company)
& (ple.account == entry.account)
& (ple.party_type == entry.party_type)
& (ple.party == entry.party)
& (ple.voucher_type == entry.vouche | 36 | 463 | execute |
|
13 | 0 | 1 | 5 | terminal.py | 281,666 | Remember Contexts (#1187)
* Refacotred classes
* Handling for new instance desired
* Added feature flag
* Converted all menu calls | OpenBBTerminal | 9 | Python | 13 | terminal.py | def call_funds(self, _):
from gamestonk_terminal.mutual_funds.mutual_fund_controller import (
FundController,
)
self.queue = self.load_class(FundController, self.queue)
| 9e671aeba98dacc69ecbbfec1f087aca3b139ee7 | 33 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 52 | def call_funds(self, _):
from ga | 9 | 50 | call_funds |
|
25 | 0 | 2 | 8 | homeassistant/components/elgato/light.py | 313,861 | Improve Elgato error handling (#73444) | core | 11 | Python | 24 | light.py | async def async_identify(self) -> None:
try:
await self.client.identify()
except ElgatoError as error:
raise HomeAssistantError(
"An error occurred while identifying the Elgato Light"
) from error
| 05d7d31dfd4d4eb4a8b10a84319d7f2778e65298 | 30 | https://github.com/home-assistant/core.git | 94 | async def async_identify(self) -> None:
try:
await self.client.ident | 7 | 56 | async_identify |
|
86 | 0 | 1 | 42 | mypy/test/teststubtest.py | 123,863 | stubtest: fix literal type construction (#11931)
Co-authored-by: hauntsaninja <> | mypy | 10 | Python | 56 | teststubtest.py | def test_bad_literal(self) -> Iterator[Case]:
yield Case("from typing_extensions import Literal", "", None) # dummy case
yield Case(
stub="INT_FLOAT_MISMATCH: Literal[1]",
runtime="INT_FLOAT_MISMATCH = 1.0",
error="INT_FLOAT_MISMATCH",
)
yield Case(
stub="WRONG_INT: Literal[1]",
runtime="WRONG_INT = 2",
error="WRONG_INT",
)
yield Case(
stub="WRONG_STR: Literal['a']",
runtime="WRONG_STR = 'b'",
error="WRONG_STR",
)
yield Case(
stub="BYTES_STR_MISMATCH: Literal[b'value']",
runtime="BYTES_STR_MISMATCH = 'value'",
error="BYTES_STR_MISMATCH",
)
yield Case(
stub="STR_BYTES_MISMATCH: Literal['value']",
runtime="STR_BYTES_MISMATCH = b'value'",
error="STR_BYTES_MISMATCH",
)
yield Case(
stub="WRONG_BYTES: Literal[b'abc']",
runtime="WRONG_BYTES = b'xyz'",
error="WRONG_BYTES",
)
yield Case(
stub="WRONG_BOOL_1: Literal[True]",
runtime="WRONG_BOOL_1 = False",
error='WRONG_BOOL_1',
)
yield Case(
stub="WRONG_BOOL_2: Literal[False]",
runtime="WRONG_BOOL_2 = True",
error='WRONG_BOOL_2',
)
| e40877d4306787acb15985888e1f33ad4bdd9912 | 147 | https://github.com/python/mypy.git | 469 | def test_bad_literal(self) -> Iterator[Case]:
yield Case("from typing_extensions import Literal", "", None) # dummy case
yield Case(
stub="INT_FLOAT_MISMATCH: Literal[1]",
runtime="INT_FLOAT_MISMATCH = 1.0",
error="INT_FLOAT_MISMATCH",
)
yield Case(
stub="WRONG_INT: Literal[1]",
runtime="WRONG_INT = 2",
error="WRONG_INT",
)
yield Case(
stub="WRONG_STR: Literal['a']",
runtime="WRONG_STR = 'b'",
error="WRONG_STR",
)
yield Case(
stub="BYTES_STR_MISMATCH: Literal[b'value']",
runtime="BYTES_STR_MISMATCH = 'value'",
error="BYTES_STR_MISMA | 7 | 266 | test_bad_literal |
|
30 | 0 | 7 | 6 | erpnext/accounts/doctype/pricing_rule/utils.py | 69,657 | fix: Reapply pricing rule on qty change | erpnext | 15 | Python | 22 | utils.py | def apply_pricing_rule_for_free_items(doc, pricing_rule_args):
if pricing_rule_args:
items = tuple((d.item_code, d.pricing_rules) for d in doc.items if d.is_free_item)
for args in pricing_rule_args:
if not items or (args.get("item_code"), args.get("pricing_rules")) not in items:
doc.append("items", args)
| b741ae143c514cf832a435db1902986a915efde4 | 70 | https://github.com/frappe/erpnext.git | 24 | def apply_pricing_rule_for_free_items(doc, pricing_rule_args):
if pricing_rule_args:
items = tuple((d.item_code, d.pricing_rules) for d in doc.items if d.is_free_item)
for args in pricing_rule_args:
if not items or (args.get("item_code"), args.get("pricing_rules")) not in items:
do | 12 | 111 | apply_pricing_rule_for_free_items |
|
21 | 0 | 1 | 6 | test_tipc/supplementary/config.py | 22,998 | add supplementary | PaddleOCR | 10 | Python | 19 | config.py | def parse_args(self, argv=None):
args = super(ArgsParser, self).parse_args(argv)
assert args.config is not None, \
"Please specify --config=configure_file_path."
args.opt = self._parse_opt(args.opt)
return args
| 11f6ff38dcc61348aa4aae8ad2fbbe42b0eab34d | 46 | https://github.com/PaddlePaddle/PaddleOCR.git | 59 | def parse_args(self, argv=None):
args = super(ArgsParser, self).parse_args(argv)
assert args.config is not None, \
"Please specify --config=configure_file_path."
args.opt = self._parse_opt(args.opt)
return args
| 9 | 70 | parse_args |
|
311 | 1 | 27 | 69 | test/lib/ansible_test/_internal/commands/sanity/import.py | 266,760 | ansible-test - Code cleanup and refactoring. (#77169)
* Remove unnecessary PyCharm ignores.
* Ignore intentional undefined attribute usage.
* Add missing type hints. Fix existing type hints.
* Fix docstrings and comments.
* Use function to register completion handler.
* Pass strings to display functions.
* Fix CompositeAction handling of dest argument.
* Use consistent types in expressions/assignments.
* Use custom function to keep linters happy.
* Add missing raise for custom exception.
* Clean up key/value type handling in cloud plugins.
* Use dataclass instead of dict for results.
* Add custom type_guard function to check lists.
* Ignore return type that can't be checked (yet).
* Avoid changing types on local variables. | ansible | 20 | Python | 209 | import.py | def test(self, args, targets, python): # type: (SanityConfig, SanityTargets, PythonConfig) -> TestResult
settings = self.load_processor(args, python.version)
paths = [target.path for target in targets.include]
if python.version.startswith('2.') and (get_virtualenv_version(args, python.path) or (0,)) < (13,):
# hack to make sure that virtualenv is available under Python 2.x
# on Python 3.x we can use the built-in venv
# version 13+ is required to use the `--no-wheel` option
try:
install_requirements(args, python, virtualenv=True, controller=False) # sanity (import)
except PipUnavailableError as ex:
display.warning(str(ex))
temp_root = os.path.join(ResultType.TMP.path, 'sanity', 'import')
messages = []
for import_type, test in (
('module', _get_module_test(True)),
('plugin', _get_module_test(False)),
):
if import_type == 'plugin' and python.version in REMOTE_ONLY_PYTHON_VERSIONS:
continue
data = '\n'.join([path for path in paths if test(path)])
if not data and not args.prime_venvs:
continue
virtualenv_python = create_sanity_virtualenv(args, python, f'{self.name}.{import_type}', coverage=args.coverage, minimize=True)
if not virtualenv_python:
display.warning(f'Skipping sanity test "{self.name}" on Python {python.version} due to missing virtual environment support.')
return SanitySkipped(self.name, python.version)
virtualenv_yaml = check_sanity_virtualenv_yaml(virtualenv_python)
if virtualenv_yaml is False:
display.warning(f'Sanity test "{self.name}" ({import_type}) on Python {python.version} may be slow due to missing libyaml support in PyYAML.')
env = ansible_environment(args, color=False)
env.update(
SANITY_TEMP_PATH=ResultType.TMP.path,
SANITY_IMPORTER_TYPE=import_type,
)
if data_context().content.collection:
external_python = create_sanity_virtualenv(args, args.controller_python, self.name)
env.update(
SANITY_COLLECTION_FULL_NAME=data_context().content.collection.full_name,
SANITY_EXTERNAL_PYTHON=external_python.path,
SANITY_YAML_TO_JSON=os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'yaml_to_json.py'),
ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION=CONTROLLER_MIN_PYTHON_VERSION,
PYTHONPATH=':'.join((get_ansible_test_python_path(), env["PYTHONPATH"])),
)
if args.prime_venvs:
continue
display.info(import_type + ': ' + data, verbosity=4)
cmd = ['importer.py']
# add the importer to the path so it can be accessed through the coverage injector
env.update(
PATH=os.pathsep.join([os.path.join(TARGET_SANITY_ROOT, 'import'), env['PATH']]),
)
try:
stdout, stderr = cover_python(args, virtualenv_python, cmd, self.name, env, capture=True, data=data)
if stdout or stderr:
raise SubprocessError(cmd, stdout=stdout, stderr=stderr)
except SubprocessError as ex:
if ex.status != 10 or ex.stderr or not ex.stdout:
raise
pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
parsed = parse_to_list_of_dict(pattern, ex.stdout)
relative_temp_root = os.path.relpath(temp_root, data_context().content.root) + os.path.sep
messages += [SanityMessage(
message=r['message'],
path=os.path.relpath(r['path'], relative_temp_root) if r['path'].startswith(relative_temp_root) else r['path'],
line=int(r['line']),
column=int(r['column']),
) for r in parsed]
if args.prime_venvs:
return SanitySkipped(self.name, python_version=python.version)
results = settings.process_errors(messages, paths)
if results:
return SanityFailure(self.name, messages=results, python_version=python.version)
return SanitySuccess(self.name, python_version=python.version)
@cache | a06fa496d3f837cca3c437ab6e9858525633d147 | @cache | 648 | https://github.com/ansible/ansible.git | 1,243 | def test(self, args, targets, python): # type: (SanityConfig, SanityTargets, PythonConfig) -> TestResult
settings = self.load_processor(args, python.version)
paths = [target.path for target in targets.include]
if python.version.startswith('2.') and (get_virtualenv_version(args, python.path) or (0,)) < (13,):
# hack to make sure that virtualenv is available under Python 2.x
# on Python 3.x we can use the built-in venv
# version 13+ is required to use the `--no-wheel` option
try:
install_requirements(args, python, virtualenv=True, controller=False) # sanity (import)
except PipUnavailableError as ex:
display.warning(str(ex))
temp_root = os.path.join(ResultType.TMP.path, 'sanity', 'import')
messages = []
for import_type, test in (
('module', _get_module_test(True)),
('plugin', _get_module_test(False)),
):
if import_type == 'plugin' and python.version in REMOTE_ONLY_PYTHON_VERSIONS:
continue
data = '\n'.join([path for path in paths if test(path)])
if not data and not args.prime_venvs:
continue
virtualenv_python = create_sanity_virtualenv(args, python, f'{self.name}.{import_type}', coverage=args.coverage, minimize=True)
if not virtualenv_python:
display.warning(f'Skipping sanity test "{self.name}" on Python {python.version} due to missing virtual environment support.')
return SanitySkipped(self.name, python.version)
virtualenv_yaml = check_sanity_virtualenv_yaml(virtualenv_python)
if virtualenv_yaml is False:
display.warning(f'Sanity test "{self.name}" ({import_type}) on Python {python.version} may be slow due to missing libyaml support in PyYAML.')
env = ansible_environment(args, color=False)
env.update(
SANITY_TEMP_PATH=ResultType.TMP.path,
SANITY_IMPORTER_TYPE=import_type,
)
if data_context().content.collection:
external_python = create_sanity_virtualenv(args, args.controller_python, self.name)
env.update(
SANITY_COLLECTION_FULL_NAME=data_context().content.collection.full_name,
SANITY_EXTERNAL_PYTHON=external_python.path,
SANITY_YAML_TO_JSON=os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'yaml_to_json.py'),
ANSIBL | 92 | 1,058 | test |
93 | 0 | 4 | 9 | website/homepage/src/guides/__init__.py | 181,227 | Use underscore instead of parentheses (#2625) | gradio | 15 | Python | 66 | __init__.py | def format_name(guide_name):
index = None
if re.match("^[0-9]+_", guide_name):
index = int(guide_name[: guide_name.index("_")])
guide_name = guide_name[guide_name.index("_") + 1 :]
if guide_name.lower().endswith(".md"):
guide_name = guide_name[:-3]
pretty_guide_name = " ".join([word[0].upper() + word[1:] for word in guide_name.split("_")])
return index, guide_name, pretty_guide_name
guide_folders = sorted(os.listdir(GUIDES_DIR))
guide_folders.remove("CONTRIBUTING.md")
guide_folders.remove("assets")
guides = []
guides_by_category = []
absolute_index = 0
for guide_folder in guide_folders:
guide_list = sorted(os.listdir(os.path.join(GUIDES_DIR, guide_folder)))
_, guide_category, pretty_guide_category = format_name(guide_folder)
guides_by_category.append({"category": pretty_guide_category, "guides": []})
for guide_file in guide_list:
guide_index, guide_name, pretty_guide_name = format_name(guide_file)
with open(os.path.join(GUIDES_DIR, guide_folder, guide_file), "r") as f:
guide_content = f.read()
title = guide_content.split("\n")[0]
metadata_labels = []
| 27d60792ead5152672d613282c8012cebc39af19 | 105 | https://github.com/gradio-app/gradio.git | 172 | def format_name(guide_name):
index = None
if re.match("^[0-9]+_", guide_name):
| 37 | 414 | format_name |
|
13 | 0 | 3 | 5 | python3.10.4/Lib/lib2to3/btm_utils.py | 218,586 | add python 3.10.4 for windows | XX-Net | 10 | Python | 12 | btm_utils.py | def get_linear_subpattern(self):
for l in self.leaves():
subp = l.leaf_to_root()
if subp:
return subp
| 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 27 | https://github.com/XX-net/XX-Net.git | 64 | def get_linear_subpattern(self):
for l in self.leaves():
subp = l.leaf_to_root( | 6 | 47 | get_linear_subpattern |
|
364 | 1 | 1 | 7 | tests/exchange/test_exchange.py | 149,749 | add "date_minus_candles" method | freqtrade | 11 | Python | 83 | test_exchange.py | def test_date_minus_candles():
date = datetime(2019, 8, 12, 13, 25, 0, tzinfo=timezone.utc)
assert date_minus_candles("5m", 3, date) == date - timedelta(minutes=15)
assert date_minus_candles("5m", 5, date) == date - timedelta(minutes=25)
assert date_minus_candles("1m", 6, date) == date - timedelta(minutes=6)
assert date_minus_candles("1h", 3, date) == date - timedelta(hours=3, minutes=25)
assert date_minus_candles("1h", 3) == timeframe_to_prev_date('1h') - timedelta(hours=3)
@pytest.mark.parametrize(
"market_symbol,base,quote,exchange,spot,margin,futures,trademode,add_dict,expected_result",
[
("BTC/USDT", 'BTC', 'USDT', "binance", True, False, False, 'spot', {}, True),
("USDT/BTC", 'USDT', 'BTC', "binance", True, False, False, 'spot', {}, True),
# No seperating /
("BTCUSDT", 'BTC', 'USDT', "binance", True, False, False, 'spot', {}, True),
("BTCUSDT", None, "USDT", "binance", True, False, False, 'spot', {}, False),
("USDT/BTC", "BTC", None, "binance", True, False, False, 'spot', {}, False),
("BTCUSDT", "BTC", None, "binance", True, False, False, 'spot', {}, False),
("BTC/USDT", "BTC", "USDT", "binance", True, False, False, 'spot', {}, True),
# Futures mode, spot pair
("BTC/USDT", "BTC", "USDT", "binance", True, False, False, 'futures', {}, False),
("BTC/USDT", "BTC", "USDT", "binance", True, False, False, 'margin', {}, False),
("BTC/USDT", "BTC", "USDT", "binance", True, True, True, 'margin', {}, True),
("BTC/USDT", "BTC", "USDT", "binance", False, True, False, 'margin', {}, True),
# Futures mode, futures pair
("BTC/USDT", "BTC", "USDT", "binance", False, False, True, 'futures', {}, True),
# Futures market
("BTC/UNK", "BTC", 'UNK', "binance", False, False, True, 'spot', {}, False),
("BTC/EUR", 'BTC', 'EUR', "kraken", True, False, False, 'spot', {"darkpool": False}, True),
("EUR/BTC", 'EUR', 'BTC', "kraken", True, False, False, 'spot', {"darkpool": False}, True),
# no darkpools
("BTC/EUR", 'BTC', 'EUR', "kraken", True, False, False, 'spot',
{"darkpool": True}, False),
# no darkpools
("BTC/EUR.d", 'BTC', 'EUR', "kraken", True, False, False, 'spot',
{"darkpool": True}, False),
("BTC/USD", 'BTC', 'USD', "ftx", True, False, False, 'spot', {}, True),
("USD/BTC", 'USD', 'BTC', "ftx", True, False, False, 'spot', {}, True),
# Can only trade spot markets
("BTC/USD", 'BTC', 'USD', "ftx", False, False, True, 'spot', {}, False),
("BTC/USD", 'BTC', 'USD', "ftx", False, False, True, 'futures', {}, True),
# Can only trade spot markets
("BTC-PERP", 'BTC', 'USD', "ftx", False, False, True, 'spot', {}, False),
("BTC-PERP", 'BTC', 'USD', "ftx", False, False, True, 'margin', {}, False),
("BTC-PERP", 'BTC', 'USD', "ftx", False, False, True, 'futures', {}, True),
("BTC/USDT:USDT", 'BTC', 'USD', "okx", False, False, True, 'spot', {}, False),
("BTC/USDT:USDT", 'BTC', 'USD', "okx", False, False, True, 'margin', {}, False),
("BTC/USDT:USDT", 'BTC', 'USD', "okx", False, False, True, 'futures', {}, True),
]) | 116b58e97cad2b86aff5e20d97f494b5ef9abd41 | @pytest.mark.parametrize(
"market_symbol,base,quote,exchange,spot,margin,futures,trademode,add_dict,expected_result",
[
("BTC/USDT", 'BTC', 'USDT', "binance", True, False, False, 'spot', {}, True),
("USDT/BTC", 'USDT', 'BTC', "binance", True, False, False, 'spot', {}, True),
# No seperating /
("BTCUSDT", 'BTC', 'USDT', "binance", True, False, False, 'spot', {}, True),
("BTCUSDT", None, "USDT", "binance", True, False, False, 'spot', {}, False),
("USDT/BTC", "BTC", None, "binance", True, False, False, 'spot', {}, False),
("BTCUSDT", "BTC", None, "binance", True, False, False, 'spot', {}, False),
("BTC/USDT", "BTC", "USDT", "binance", True, False, False, 'spot', {}, True),
# Futures mode, spot pair
("BTC/USDT", "BTC", "USDT", "binance", True, False, False, 'futures', {}, False),
("BTC/USDT", "BTC", "USDT", "binance", True, False, False, 'margin', {}, False),
("BTC/USDT", "BTC", "USDT", "binance", True, True, True, 'margin', {}, True),
("BTC/USDT", "BTC", "USDT", "binance", False, True, False, 'margin', {}, True),
# Futures mode, futures pair
("BTC/USDT", "BTC", "USDT", "binance", False, False, True, 'futures', {}, True),
# Futures market
("BTC/UNK", "BTC", 'UNK', "binance", False, False, True, 'spot', {}, False),
("BTC/EUR", 'BTC', 'EUR', "kraken", True, False, False, 'spot', {"darkpool": False}, True),
("EUR/BTC", 'EUR', 'BTC', "kraken", True, False, False, 'spot', {"darkpool": False}, True),
# no darkpools
("BTC/EUR", 'BTC', 'EUR', "kraken", True, False, False, 'spot',
{"darkpool": True}, False),
# no darkpools
("BTC/EUR.d", 'BTC', 'EUR', "kraken", True, False, False, 'spot',
{"darkpool": True}, False),
("BTC/USD", 'BTC', 'USD', "ftx", True, False, False, 'spot', {}, True),
("USD/BTC", 'USD', 'BTC', "ftx", True, False, False, 'spot', {}, True),
# Can only trade spot markets
("BTC/USD", 'BTC', 'USD', "ftx", False, False, True, 'spot', {}, False),
("BTC/USD", 'BTC', 'USD', "ftx", False, False, True, 'futures', {}, True),
# Can only trade spot markets
("BTC-PERP", 'BTC', 'USD', "ftx", False, False, True, 'spot', {}, False),
("BTC-PERP", 'BTC', 'USD', "ftx", False, False, True, 'margin', {}, False),
("BTC-PERP", 'BTC', 'USD', "ftx", False, False, True, 'futures', {}, True),
("BTC/USDT:USDT", 'BTC', 'USD', "okx", False, False, True, 'spot', {}, False),
("BTC/USDT:USDT", 'BTC', 'USD', "okx", False, False, True, 'margin', {}, False),
("BTC/USDT:USDT", 'BTC', 'USD', "okx", False, False, True, 'futures', {}, True),
]) | 121 | https://github.com/freqtrade/freqtrade.git | 650 | def test_date_minus_candles():
date = datetime(2019, 8, 12, 13, 25, 0, tzinfo=timezone.utc)
assert date_minus_candles("5m", 3, date) == date - timedelta(minutes=15)
assert date_minus_candles("5m", 5, date) == date - timedelta(minutes=25)
assert date_minus_candles("1m", 6, date) == date - timedelta(minutes=6)
assert date_minus_candles("1h", 3, date) == date - timedelta(hours=3, minutes=25)
assert date_minus_candles("1h", 3) == timeframe_to_prev_date('1h') - timedelta(hours=3)
@pytest.mark.parametrize(
"market_symbol,base,quote,exchange,spot,margin,futures,trademode,add_dict,expected_result",
[
("BTC/USDT", 'BTC', 'USDT', "binance", True, False, False, 'spot', {}, True),
("USDT/BTC", 'USDT', 'BTC', "binance", True, False, False, 'spot', {}, True),
# No seperating /
("BTCUSDT", 'BTC', 'USDT', "binance", True, False, False, 'spot', {}, True),
("BTCUSDT", None, "USDT", "binance", True, False, False, 'spot', {}, False),
("USDT/BTC", "BTC", None, "binance", True, False, False, 'spot', {}, False),
("BTCUSDT", "BTC", None, "binance", True, False, False, 'spot', {}, False),
("BTC/USDT", "BTC", "USDT", "binance", True, False, False, 'spot', {}, True),
# Futures mode, spot pair
("BTC/USDT", "BTC", "USDT", "binance", True, False, False, 'futures', {}, False),
("BTC/USDT", "BTC", "USDT", "binance", True, False, False, 'margin', {}, False),
("BTC/USDT", "BTC", "USDT", "binance", True, True, True, 'margin', {}, True),
("BTC/USDT", "BTC", "USDT", "binance", False, True, False, 'margin', {}, True),
# Futures mode, futures pair
("BTC/USDT", "BTC", "USDT", "binance", False, False, True, 'futures', {}, True),
# Futures market
("BTC/UNK", "BTC", 'UNK', "binance", False, False, True, 'spot', {}, False),
("BTC/EUR", 'BTC', 'EUR', "kraken", True, False, False, 'spot', {"darkpool": False}, True),
("EUR/BTC", 'EUR', 'BTC', "kraken", True | 14 | 1,178 | test_date_minus_candles |
34 | 0 | 5 | 15 | saleor/checkout/utils.py | 28,670 | Allow to pass metadata directly to line data in checkoutCreate and checkoutLinesAdd mutations (#10592) | saleor | 16 | Python | 31 | utils.py | def _append_line_to_create(to_create, checkout, variant, line_data, line):
if line is None:
if line_data.quantity > 0:
checkout_line = CheckoutLine(
checkout=checkout,
variant=variant,
quantity=line_data.quantity,
currency=checkout.currency,
price_override=line_data.custom_price,
)
if line_data.metadata_list:
checkout_line.store_value_in_metadata(
{data.key: data.value for data in line_data.metadata_list}
)
to_create.append(checkout_line)
| 577d43f21b7d6774dd272fedbb5719e101b92308 | 87 | https://github.com/saleor/saleor.git | 211 | def _append_line_to_create(to_create, checkout, variant, line_data, line):
if line is None:
if line_data.quantity > 0:
checkout_line = CheckoutLine(
checkout=checkout,
variant=variant,
quantity=line_data.quantity,
currency=checkout.currency,
price_override=line_data.custom_price,
)
if line_data.metadata_list:
| 18 | 127 | _append_line_to_create |
|
49 | 0 | 2 | 13 | tests/integration/reduce/test_reduce.py | 11,602 | refactor: unify port args (#4382) | jina | 18 | Python | 42 | test_reduce.py | def test_uses_before_no_reduce_real_executor_uses():
flow = (
Flow(port=exposed_port)
.add(uses=Executor1, name='pod0')
.add(uses=Executor2, needs='gateway', name='pod1')
.add(uses=Executor3, needs='gateway', name='pod2')
.add(needs=['pod0', 'pod1', 'pod2'], name='pod3', uses=DummyExecutor)
)
with flow as f:
da = DocumentArray([Document() for _ in range(5)])
resp = Client(port=exposed_port, return_responses=True).post('/', inputs=da)
# assert no reduce happened
assert len(resp[0].docs) == 1
assert resp[0].docs[0].id == 'fake_document'
| 51403a57d03f0b1ddfd7fc533ccee78e23f5faa1 | 145 | https://github.com/jina-ai/jina.git | 115 | def test_uses_before_no_reduce_real_executor_uses():
flow = (
Flow(port=exposed_port)
.add(uses=Executor1, name='pod0')
.add(uses=Executor2, needs='gateway', name='pod1')
.add(uses=Executor3, needs='gateway', name='pod2')
.add(needs=['pod0', 'pod1', 'pod2'], name='pod3', uses=DummyExecutor)
)
with flow as f:
da = DocumentArray([Document() for _ in range(5)])
resp = Client(port=exposed_port, return_responses=True).post('/', inputs=da)
# assert no reduce happened
assert len(resp[0].docs) == 1
assert resp[0].docs[0].id == 'fake_document'
| 27 | 240 | test_uses_before_no_reduce_real_executor_uses |
|
96 | 0 | 4 | 51 | saleor/graphql/checkout/mutations/order_create_from_checkout.py | 28,599 | Use dataloader for discounts instead of lazy object (#10512)
* Use dataloader for discounts instead of lazy object
* Rename load function to be in par with other loaders | saleor | 15 | Python | 68 | order_create_from_checkout.py | def perform_mutation(cls, _root, info, **data):
checkout_id = data.get("id")
checkout = cls.get_node_or_error(
info,
checkout_id,
field="id",
only_type=Checkout,
code=OrderCreateFromCheckoutErrorCode.CHECKOUT_NOT_FOUND.value,
)
tracking_code = analytics.get_client_id(info.context)
discounts = load_discounts(info.context)
manager = info.context.plugins
checkout_lines, unavailable_variant_pks = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, checkout_lines, discounts, manager
)
validate_checkout(
checkout_info=checkout_info,
lines=checkout_lines,
unavailable_variant_pks=unavailable_variant_pks,
discounts=discounts,
manager=manager,
)
app = load_app(info.context)
try:
order = create_order_from_checkout(
checkout_info=checkout_info,
checkout_lines=checkout_lines,
discounts=discounts,
manager=info.context.plugins,
user=info.context.user,
app=app,
tracking_code=tracking_code,
delete_checkout=data["remove_checkout"],
)
except NotApplicable:
code = OrderCreateFromCheckoutErrorCode.VOUCHER_NOT_APPLICABLE.value
raise ValidationError(
{
"voucher_code": ValidationError(
"Voucher not applicable",
code=code,
)
}
)
except InsufficientStock as e:
error = prepare_insufficient_stock_checkout_validation_error(e)
raise error
except GiftCardNotApplicable as e:
raise ValidationError({"gift_cards": e})
return OrderCreateFromCheckout(order=order)
| a579eb534e06a854805f037a587f13258f22fdf5 | 234 | https://github.com/saleor/saleor.git | 657 | def perform_mutation(cls, _root, info, **data):
checkout_id = data.get("id")
checkout = cls.get_node_or_error(
info,
checkout_id,
field="id",
only_type=Checkout,
code=OrderCreateFromCheckoutErrorCode.CHECKOUT_NOT_FOUND.value,
)
tracking_code = analytics.get_client_id(info.context)
discounts = load_discounts(info.context)
man | 46 | 358 | perform_mutation |
|
9 | 0 | 1 | 3 | homeassistant/components/deconz/gateway.py | 292,674 | Make type checking pass for deCONZ init, gateway and services (#66054)
* Type and enable type checking for init, config_flow, diagnostics, gateway and services
* Fix import
* Fix review comment | core | 10 | Python | 9 | gateway.py | def master(self) -> bool:
return cast(bool, self.config_entry.options[CONF_MASTER_GATEWAY])
# Options
| dd88a05cb400d416a68a1be16fee8ee2ab48a70f | 22 | https://github.com/home-assistant/core.git | 26 | def master(self) -> bool:
retur | 7 | 37 | master |
|
13 | 0 | 1 | 11 | tests/components/homewizard/test_coordinator.py | 309,183 | Add HomeWizard Energy integration (#55812)
Co-authored-by: Matthias Alphart <farmio@alphart.net>
Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io> | core | 10 | Python | 13 | test_coordinator.py | async def test_coordinator_failed_to_update(aioclient_mock, hass):
# Update failed by internal error
meter = get_mock_device(product_type="p1_meter")
| 8f6e24aa1ea7061e9b52085deb57c49e9ccf4a86 | 55 | https://github.com/home-assistant/core.git | 22 | async def test_coordinator_failed_to_update(aioclient_mock, hass):
# Update failed by internal error
meter = get_mock_device(product_type="p | 6 | 32 | test_coordinator_failed_to_update |
|
10 | 0 | 1 | 4 | gradio/blocks.py | 179,966 | blocks-with-fix
- add support for "with gr.Blocks() as demo:" usage | gradio | 7 | Python | 7 | blocks.py | def __enter__(self):
self.parent = Context.block
Context.block = self
return self
| da4a59459f7eaa7312b50687a4a61cef1fb411d6 | 19 | https://github.com/gradio-app/gradio.git | 30 | def __enter__(self):
| 5 | 31 | __enter__ |
|
36 | 0 | 2 | 16 | wagtail/admin/views/generic/models.py | 78,206 | Add breadcrumbs and new Page Editor side panels to Snippets views (#8623) | wagtail | 13 | Python | 28 | models.py | def save_instance(self):
instance = self.form.save()
revision = None
self.has_content_changes = self.form.has_changed()
# Save revision if the model inherits from RevisionMixin
if self.revision_enabled:
revision = instance.save_revision(
user=self.request.user,
changed=self.has_content_changes,
)
log(
instance=instance,
action="wagtail.edit",
revision=revision,
content_changed=self.has_content_changes,
)
return instance
| 952edd84c7a0fd9249257591bc92cd55cf59c0f8 | 78 | https://github.com/wagtail/wagtail.git | 195 | def save_instance(self):
instance = self.form.save()
revision = None
self.has_content_changes = self.form.has_changed()
# Save revision if the model inherits from RevisionMixin
if self.revision_enabled:
revision = instance.sa | 16 | 125 | save_instance |
|
132 | 0 | 8 | 39 | tests/rest/admin/test_room.py | 249,162 | Use literals in place of `HTTPStatus` constants in tests (#13469) | synapse | 15 | Python | 89 | test_room.py | def test_context_as_admin(self) -> None:
# Create a room. We're not part of it.
user_id = self.register_user("test", "test")
user_tok = self.login("test", "test")
room_id = self.helper.create_room_as(user_id, tok=user_tok)
# Populate the room with events.
events = []
for i in range(30):
events.append(
self.helper.send_event(
room_id, "com.example.test", content={"index": i}, tok=user_tok
)
)
# Now let's fetch the context for this room.
midway = (len(events) - 1) // 2
channel = self.make_request(
"GET",
"/_synapse/admin/v1/rooms/%s/context/%s"
% (room_id, events[midway]["event_id"]),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(
channel.json_body["event"]["event_id"], events[midway]["event_id"]
)
for found_event in channel.json_body["events_before"]:
for j, posted_event in enumerate(events):
if found_event["event_id"] == posted_event["event_id"]:
self.assertTrue(j < midway)
break
else:
self.fail("Event %s from events_before not found" % j)
for found_event in channel.json_body["events_after"]:
for j, posted_event in enumerate(events):
if found_event["event_id"] == posted_event["event_id"]:
self.assertTrue(j > midway)
break
else:
self.fail("Event %s from events_after not found" % j)
| c97042f7eef3748e17c90e48a4122389a89c4735 | 259 | https://github.com/matrix-org/synapse.git | 557 | def test_context_as_admin(self) -> None:
# Create a room. We're not part of it.
user_id = self.register_user("test", "test")
user_tok = self.login("test", "test")
room_id = self.helper.create_room_as(user_id, tok=user_tok)
# Populate the room with events.
events = []
for i in range(30):
events.append(
self.helper.send_event(
room_id, "com.example.test", content={"index": i}, tok=user_tok
)
)
# Now let's fetch the context for this room.
midway = (len(events) - 1) // 2
channel = self.make_request(
"GET",
"/_synapse/admin/v1/rooms/%s/context/%s"
% (room_id, events[midw | 32 | 435 | test_context_as_admin |
|
77 | 0 | 4 | 22 | airflow/migrations/versions/3c20cacc0044_add_dagrun_run_type.py | 45,457 | Autogenerate migration reference doc (#21601)
* document airflow version in each alembic migration module and use this to autogen the doc
* update each migration module to have the same description used in migration ref (so it can be used in autogen) | airflow | 18 | Python | 55 | 3c20cacc0044_add_dagrun_run_type.py | def upgrade():
run_type_col_type = sa.String(length=50)
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
dag_run_columns = [col.get('name') for col in inspector.get_columns("dag_run")]
if "run_type" not in dag_run_columns:
# Add nullable column
with op.batch_alter_table("dag_run") as batch_op:
batch_op.add_column(sa.Column("run_type", run_type_col_type, nullable=True))
# Generate run type for existing records
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=conn)
for run_type in DagRunType:
session.query(DagRun).filter(DagRun.run_id.like(f"{run_type.value}__%")).update(
{DagRun.run_type: run_type.value}, synchronize_session=False
)
session.query(DagRun).filter(DagRun.run_type.is_(None)).update(
{DagRun.run_type: DagRunType.MANUAL.value}, synchronize_session=False
)
session.commit()
# Make run_type not nullable
with op.batch_alter_table("dag_run") as batch_op:
batch_op.alter_column(
"run_type", existing_type=run_type_col_type, type_=run_type_col_type, nullable=False
)
| 69f6f9e01b6df76c3c8fa266d460324163957887 | 210 | https://github.com/apache/airflow.git | 268 | def upgrade():
run_type_col_type = sa.String(length=50)
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
dag_run_columns = [col.get('name') for col in inspector.get_columns("dag_run")]
if "run_type" not in dag_run_columns:
# Add nullable column
with op.batch_alter_table("dag_run") as batch_op:
batch_op.add_column(sa.Column("run_type", run_type_col_type, nullable=True))
# Generate run type for existing records
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=conn)
for run_type in DagRunType:
session.query(DagRun).filter(DagRun.run_id.like(f"{run_type.value}__%")).update(
{DagRun.run_type: run_type.value}, synchronize_session=False
)
session.query(DagRun).filter(DagRun.run_type.is_(None)).update(
{DagRun.run_type: DagRunType.MANUAL.value}, synchron | 40 | 360 | upgrade |
|
54 | 0 | 1 | 18 | dask/tests/test_distributed.py | 155,853 | Move creation of sqlalchemy connection for picklability (#8745) | dask | 14 | Python | 46 | test_distributed.py | async def test_to_sql_engine_kwargs(c, s, a, b):
# https://github.com/dask/dask/issues/8738
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
pytest.importorskip("sqlalchemy")
df = pd.DataFrame({"a": range(10), "b": range(10)})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=1)
with tmpfile() as f:
uri = f"sqlite:///{f}"
result = ddf.to_sql(
"test", uri, index=True, engine_kwargs={"echo": False}, compute=False
)
await c.compute(result)
dd.utils.assert_eq(
ddf,
dd.read_sql_table("test", uri, "index"),
check_divisions=False,
)
| 48820dfc4565d0dcd299409553e6721c246a9f88 | 142 | https://github.com/dask/dask.git | 163 | async def test_to_sql_engine_kwargs(c, s, a, b):
# https://github.com/dask/dask/issues/8738
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
pytest.importorskip("sqlalchemy")
df = pd.DataFrame({"a": range(10), "b": range(10)})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=1)
with tmpfile() as f:
uri = f"sqlite:///{f}"
result = ddf.to_sql(
"test" | 28 | 243 | test_to_sql_engine_kwargs |
|
40 | 0 | 4 | 12 | apps/orgs/models.py | 188,195 | fix: fix rbac to dev (#7636)
* feat: 添加 RBAC 应用模块
* feat: 添加 RBAC Model、API
* feat: 添加 RBAC Model、API 2
* feat: 添加 RBAC Model、API 3
* feat: 添加 RBAC Model、API 4
* feat: RBAC
* feat: RBAC
* feat: RBAC
* feat: RBAC
* feat: RBAC
* feat: RBAC 整理权限位
* feat: RBAC 整理权限位2
* feat: RBAC 整理权限位2
* feat: RBAC 整理权限位
* feat: RBAC 添加默认角色
* feat: RBAC 添加迁移文件;迁移用户角色->用户角色绑定
* feat: RBAC 添加迁移文件;迁移用户角色->用户角色绑定
* feat: RBAC 修改用户模块API
* feat: RBAC 添加组织模块迁移文件 & 修改组织模块API
* feat: RBAC 添加组织模块迁移文件 & 修改组织模块API
* feat: RBAC 修改用户角色属性的使用
* feat: RBAC No.1
* xxx
* perf: 暂存
* perf: ...
* perf(rbac): 添加 perms 到 profile serializer 中
* stash
* perf: 使用init
* perf: 修改migrations
* perf: rbac
* stash
* stash
* pref: 修改rbac
* stash it
* stash: 先去修复其他bug
* perf: 修改 role 添加 users
* pref: 修改 RBAC Model
* feat: 添加权限的 tree api
* stash: 暂存一下
* stash: 暂存一下
* perf: 修改 model verbose name
* feat: 添加model各种 verbose name
* perf: 生成 migrations
* perf: 优化权限位
* perf: 添加迁移脚本
* feat: 添加组织角色迁移
* perf: 添加迁移脚本
* stash
* perf: 添加migrateion
* perf: 暂存一下
* perf: 修改rbac
* perf: stash it
* fix: 迁移冲突
* fix: 迁移冲突
* perf: 暂存一下
* perf: 修改 rbac 逻辑
* stash: 暂存一下
* perf: 修改内置角色
* perf: 解决 root 组织的问题
* perf: stash it
* perf: 优化 rbac
* perf: 优化 rolebinding 处理
* perf: 完成用户离开组织的问题
* perf: 暂存一下
* perf: 修改翻译
* perf: 去掉了 IsSuperUser
* perf: IsAppUser 去掉完成
* perf: 修改 connection token 的权限
* perf: 去掉导入的问题
* perf: perms define 格式,修改 app 用户 的全新啊
* perf: 修改 permission
* perf: 去掉一些 org admin
* perf: 去掉部分 org admin
* perf: 再去掉点 org admin role
* perf: 再去掉部分 org admin
* perf: user 角色搜索
* perf: 去掉很多 js
* perf: 添加权限位
* perf: 修改权限
* perf: 去掉一个 todo
* merge: with dev
* fix: 修复冲突
Co-authored-by: Bai <bugatti_it@163.com>
Co-authored-by: Michael Bai <baijiangjie@gmail.com>
Co-authored-by: ibuler <ibuler@qq.com> | jumpserver | 13 | Python | 32 | models.py | def get_total_resources_amount(self):
from django.apps import apps
from orgs.mixins.models import OrgModelMixin
summary = {'users.Members': self.get_members().count()}
for app_name, app_config in apps.app_configs.items():
models_cls = app_config.get_models()
for model in models_cls:
if not issubclass(model, OrgModelMixin):
continue
key = '{}.{}'.format(app_name, model.__name__)
summary[key] = self.get_resource_amount(model)
return summary
| e259d2a9e9167c58fa75a78d1050dd5dcfde96f4 | 94 | https://github.com/jumpserver/jumpserver.git | 160 | def get_total_resources_amount(self):
from django.apps import apps
from orgs.mixins.models import OrgModelMixin
summary = {'users.Members': self.get_members().count()}
for app_name, app_config in apps.app_configs.items():
models_cls = app_config.get_models()
for model in models_cls:
if not issubclass(model, OrgModelMixin | 23 | 151 | get_total_resources_amount |
|
28 | 0 | 1 | 7 | pandas/tests/io/excel/test_odf.py | 165,396 | BUG: error in read_excel with some ods files #45598 (#46050)
* BUG: error in read_excel with some ods files #45598
* BUG: use hasattr instead of dir
* DOC: add issue number in new test case
* DOC: remove comment
Co-authored-by: Dimitra Karadima <dkaradima@convertgroup.com> | pandas | 11 | Python | 26 | test_odf.py | def test_read_newlines_between_xml_elements_table():
# GH#45598
expected = pd.DataFrame(
[[1.0, 4.0, 7], [np.nan, np.nan, 8], [3.0, 6.0, 9]],
columns=["Column 1", "Column 2", "Column 3"],
)
result = pd.read_excel("test_newlines.ods")
tm.assert_frame_equal(result, expected)
| 004b4c58779612a91972e3d9b1ce3c8e045d8e14 | 75 | https://github.com/pandas-dev/pandas.git | 56 | def test_read_newlines_between_xml_elements_table():
# GH#45598
expected = pd.DataFrame(
[[1.0, 4.0, 7], [np.nan, np.nan, 8], [3.0, 6.0, 9]],
columns=["Column 1", "Column 2", "Column 3"],
)
result = pd.read_excel("test_newlines.ods")
tm.assert_frame_equal(result, expected)
| 11 | 102 | test_read_newlines_between_xml_elements_table |
|
60 | 0 | 2 | 10 | tests/test_iterable_dataset.py | 105,223 | Add `concatenate_datasets` for iterable datasets (#4500)
* add concatenate_datasets for iterable datasets
* fix
* infer features
* fill missing rowzs and columns
* comments
* only check for duplicate keys once
* comments
* keep concatenate_datasets in arrow_dataset (to be deprecated)
* style
* comments, typing, fix missing token_per_repo_id
* style | datasets | 11 | Python | 44 | test_iterable_dataset.py | def test_concatenate_datasets_with_different_columns():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {})
dataset2 = IterableDataset(ex_iterable2)
# missing column "label" -> it should be replaced with nulls
extended_dataset2_list = [{"label": None, **x} for x in dataset2]
concatenated_dataset = concatenate_datasets([dataset1, dataset2])
assert list(concatenated_dataset) == list(dataset1) + extended_dataset2_list
# change order
concatenated_dataset = concatenate_datasets([dataset2, dataset1])
assert list(concatenated_dataset) == extended_dataset2_list + list(dataset1)
| f5826eff9b06ab10dba1adfa52543341ef1e6009 | 97 | https://github.com/huggingface/datasets.git | 92 | def test_concatenate_datasets_with_different_columns():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
dataset1 = It | 13 | 157 | test_concatenate_datasets_with_different_columns |
|
133 | 0 | 1 | 16 | sympy/solvers/tests/test_decompogen.py | 196,954 | made changes | sympy | 16 | Python | 64 | test_decompogen.py | def test_decompogen():
assert decompogen(sin(cos(x)), x) == [sin(x), cos(x)]
assert decompogen(sin(x)**2 + sin(x) + 1, x) == [x**2 + x + 1, sin(x)]
assert decompogen(sqrt(6*x**2 - 5), x) == [sqrt(x), 6*x**2 - 5]
assert decompogen(sin(sqrt(cos(x**2 + 1))), x) == [sin(x), sqrt(x), cos(x), x**2 + 1]
assert decompogen(Abs(cos(x)**2 + 3*cos(x) - 4), x) == [Abs(x), x**2 + 3*x - 4, cos(x)]
assert decompogen(sin(x)**2 + sin(x) - sqrt(3)/2, x) == [x**2 + x - sqrt(3)/2, sin(x)]
assert decompogen(Abs(cos(y)**2 + 3*cos(x) - 4), x) == [Abs(x), 3*x + cos(y)**2 - 4, cos(x)]
assert decompogen(x, y) == [x]
assert decompogen(1, x) == [1]
assert decompogen(Max(3, x), x) == [Max(3, x)]
raises(TypeError, lambda: decompogen(x < 5, x))
u = 2*x + 3
assert decompogen(Max(sqrt(u),(u)**2), x) == [Max(sqrt(x), x**2), u]
assert decompogen(Max(u, u**2, y), x) == [Max(x, x**2, y), u]
| bbca83fd553f5f14251ab08ae06cbd7524d2bbc1 | 430 | https://github.com/sympy/sympy.git | 174 | def test_decompogen():
assert decompogen(sin(cos(x)), x) == [sin(x), cos(x)]
assert decompogen(sin(x)**2 + sin(x) + 1, x) == [x**2 + x + 1, sin(x)]
assert decompogen(sqrt(6*x**2 - 5), x) == [sqrt(x), 6*x**2 - 5]
assert decompogen(sin(sqrt(cos(x**2 + 1))), x) == [sin(x), sqrt(x), cos(x), x**2 + 1]
assert decompogen(Abs(cos(x)**2 + 3*cos(x) - 4), x) == [Abs(x), x**2 + 3*x - 4, cos(x)]
assert decompogen(sin(x)**2 + sin(x) - sqrt(3)/2, x) == [x**2 + x - sqrt(3)/2, sin(x)]
assert decompogen(Abs(cos(y)**2 + 3*cos(x) - 4), x) == [Abs(x), 3*x + cos(y)**2 - 4, cos(x)]
assert decompogen(x, y) == [x]
assert decompogen(1, x) == [1]
assert decompogen | 12 | 605 | test_decompogen |
|
100 | 0 | 1 | 26 | pandas/tests/groupby/test_groupby.py | 171,540 | DEPR: Enforce numeric_only=False in groupby sum/mean (#49829)
* DEPR: Enforce numeric_only=False in groupby sum/mean
* cleanup
* Refinements
* whatsnew fixup | pandas | 12 | Python | 46 | test_groupby.py | def test_frame_set_name_single(df):
grouped = df.groupby("A")
msg = "The default value of numeric_only"
with pytest.raises(TypeError, match="Could not convert"):
grouped.mean()
result = grouped.mean(numeric_only=True)
assert result.index.name == "A"
with pytest.raises(TypeError, match="Could not convert"):
df.groupby("A", as_index=False).mean()
result = df.groupby("A", as_index=False).mean(numeric_only=True)
assert result.index.name != "A"
with pytest.raises(TypeError, match="Could not convert"):
grouped.agg(np.mean)
result = grouped[["C", "D"]].agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"foo": np.mean, "bar": np.std})
| 3fffb6d49abe20ebc4a3380181f90103fb9ce22e | 280 | https://github.com/pandas-dev/pandas.git | 190 | def test_frame_set_name_single(df):
grouped = df.groupby("A")
msg = "The default value of numeric_only"
with pytest.raises(TypeError, match="Could not convert"):
grouped.mean()
result = grouped.mean(numeric_only=True)
assert result.index.name == "A"
with pytest.raises(TypeError, match="Could not convert"):
df.groupby("A", as_index=False).mean()
result = df.groupby("A", as_index=False).mean(numeric_only=True)
assert result.index.name != "A"
with pytest.raises(TypeError, match="Could not convert"):
grouped.agg(np.mean)
result = grouped[["C", "D"]].agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(Specificatio | 19 | 485 | test_frame_set_name_single |
|
56 | 0 | 3 | 13 | dask/dataframe/io/tests/test_sql.py | 155,698 | Add engine_kwargs support to dask.dataframe.to_sql (#8609)
### Use case: SSL
This was the original [use case] that motivated #8596: to force SSL on the db connection. Whether the new `engine_kwargs` argument is helpful/necessary for this use case depends on the db driver used by SQLAlchemy:
* [MySQL] (helpful): either use `connect_args={'ssl': ...}` as engine kwarg or add `?ssl_cert=...&ssl_key=...` to URI.
* [psycopg2] (not helpful): must use `?sslmode=require` in connection URI, not supported as engine argument.
* [pg8000] (necessary): must use `connect_args={'ssl_context': ...}` as engine kwarg.
[use case]: https://github.com/coiled/dask-community/issues/186
[MySQL]: https://docs.sqlalchemy.org/en/14/dialects/mysql.html#ssl-connections
[psycopg2]: https://docs.sqlalchemy.org/en/14/dialects/postgresql.html#ssl-connections
[pg8000]: https://docs.sqlalchemy.org/en/14/dialects/postgresql.html#pg8000-ssl | dask | 13 | Python | 29 | test_sql.py | def test_to_sql_engine_kwargs(caplog):
ddf = dd.from_pandas(df, 2)
with tmp_db_uri() as uri:
ddf.to_sql("test", uri, engine_kwargs={"echo": False})
logs = "\n".join(r.message for r in caplog.records)
assert logs == ""
assert_eq(df, read_sql_table("test", uri, "number"))
with tmp_db_uri() as uri:
ddf.to_sql("test", uri, engine_kwargs={"echo": True})
logs = "\n".join(r.message for r in caplog.records)
assert "CREATE" in logs
assert "INSERT" in logs
assert_eq(df, read_sql_table("test", uri, "number"))
| 80f9821f4d5a4badc2179dbd17f1fc7730cc9f50 | 131 | https://github.com/dask/dask.git | 127 | def test_to_sql_engine_kwargs(caplog):
ddf = dd.from_pandas(df, 2)
with tmp_db_uri() as uri:
ddf.to_sql("test", uri, engine_kwargs={"echo": False})
logs = "\n".join(r.message for r in caplog.records)
assert logs == ""
assert_eq(df, read_sql_table("test", uri, "number"))
with tmp_db_uri() as uri:
ddf.to_sql("test", uri, engine_kwargs={"echo": True})
logs = "\n".join(r.message for r in caplog.records)
assert "CREATE" in logs
| 17 | 230 | test_to_sql_engine_kwargs |
|
13 | 0 | 1 | 8 | tests/db_functions/comparison/test_greatest.py | 202,585 | Refs #33476 -- Reformatted code with Black. | django | 15 | Python | 13 | test_greatest.py | def test_decimal_filter(self):
obj = DecimalModel.objects.create(n1=Decimal("1.1"), n2=Decimal("1.2"))
self.assertCountEqual(
DecimalModel.objects.annotate(
greatest=Greatest("n1", "n2"),
).filter(greatest=Decimal("1.2")),
[obj],
)
| 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | 63 | https://github.com/django/django.git | 81 | def test_decimal_filter(self):
obj = DecimalModel.objects.create(n1=Decimal("1.1"), n2=Decimal("1.2"))
self.assertCountEqual(
DecimalModel.objects.annotate(
greatest=Greatest("n1", "n2"),
). | 14 | 107 | test_decimal_filter |
|
17 | 0 | 3 | 8 | django/db/models/lookups.py | 205,690 | Refs #33476 -- Reformatted code with Black. | django | 11 | Python | 13 | lookups.py | def get_bilateral_transforms(self):
if hasattr(self.lhs, "get_bilateral_transforms"):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if self.bilateral:
bilateral_transforms.append(self.__class__)
return bilateral_transforms
| 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | 45 | https://github.com/django/django.git | 77 | def get_bilateral_transforms(self):
if hasattr(self.lhs, "get_bilateral_transforms"):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if self.bilateral:
bilateral_transforms.append(self.__class__)
return bilateral_transforms
| 8 | 76 | get_bilateral_transforms |
|
12 | 0 | 1 | 3 | saleor/graphql/order/schema.py | 26,978 | Stricter signatures for resolvers and mutations (#9649) | saleor | 8 | Python | 11 | schema.py | def resolve_homepage_events(_root, info, **kwargs):
qs = resolve_homepage_events()
return create_connection_slice(qs, info, kwargs, OrderEventCountableConnection)
| 513fc80bc698c177b87774b3aff3da7b9aedbe06 | 26 | https://github.com/saleor/saleor.git | 25 | def resolve_homepage_events(_root, info, **kwargs):
qs = resolve_homepage_events()
return create_connection_slice(qs, info, kwargs, OrderEventCountableConnection)
| 7 | 38 | resolve_homepage_events |
|
18 | 0 | 1 | 7 | zerver/tests/test_digest.py | 84,027 | testing: 100% coverage for zerver/tests/test_digest.py. | zulip | 11 | Python | 15 | test_digest.py | def test_no_logging(self) -> None:
hamlet = self.example_user("hamlet")
startlen = len(RealmAuditLog.objects.all())
bulk_write_realm_audit_logs([])
self.assert_length(RealmAuditLog.objects.all(), startlen)
bulk_write_realm_audit_logs([hamlet])
self.assert_length(RealmAuditLog.objects.all(), startlen + 1)
| ba5cf331a2c65bc5d09be28892327e59698eda0e | 68 | https://github.com/zulip/zulip.git | 59 | def test_no_logging(self) -> None:
hamlet = self.example_user("hamlet")
startlen = len(RealmAuditLog.objects.all())
bulk_write_realm_audit_logs([])
self.a | 11 | 113 | test_no_logging |
|
20 | 0 | 1 | 10 | langchain/llms/cohere.py | 191,459 | model laboratory (#95) | langchain | 8 | Python | 20 | cohere.py | def _default_params(self) -> Mapping[str, Any]:
return {
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"k": self.k,
"p": self.p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
}
| db37bd089fc18c8215da42202dfadc397b20d26c | 52 | https://github.com/hwchase17/langchain.git | 107 | def _default_params(self) -> Mapping[str, Any]:
return {
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"k": self.k,
"p": self.p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
}
| 11 | 87 | _default_params |
|
150 | 0 | 23 | 78 | openbb_terminal/portfolio/portfolio_controller.py | 286,488 | Feature/attribution toolkit (#3156)
* add attribution toolkit
* add attrib to test script for portfolio
* removes yahooquery dependency and early rounding
* Update _index.md
* update feature to include raw and type flags, graph always shows, table output optional, one type of output at a time
* Linting
* Update index
* Update index 2
* Update tests
* changes argument descriptions
* Small fix
* Formatting Black
Co-authored-by: S3908818 <S3908818@student.rmit.edu.au>
Co-authored-by: Louise Platts (S3908818) <88080425+s3908818@users.noreply.github.com>
Co-authored-by: Jeroen Bouma <jer.bouma@gmail.com>
Co-authored-by: James Maslek <jmaslek11@gmail.com>
Co-authored-by: Louise Amy <74476622+louiseamy4@users.noreply.github.com>
Co-authored-by: Jeroen Bouma <jeroenbouma@Jeroens-MacBook-Pro.local> | OpenBBTerminal | 11 | Python | 73 | portfolio_controller.py | def print_help(self):
mt = MenuText("portfolio/")
mt.add_menu("bro")
mt.add_menu("po")
mt.add_raw("\n")
mt.add_cmd("load")
mt.add_cmd("show")
mt.add_cmd("bench")
mt.add_raw("\n")
mt.add_param("_loaded", self.portfolio_name)
mt.add_param("_riskfreerate", self.portfolio_name)
mt.add_param("_benchmark", self.benchmark_name)
mt.add_raw("\n")
mt.add_info("_graphs_")
mt.add_cmd("holdv", self.portfolio_name and self.benchmark_name)
mt.add_cmd("holdp", self.portfolio_name and self.benchmark_name)
mt.add_cmd("yret", self.portfolio_name and self.benchmark_name)
mt.add_cmd("mret", self.portfolio_name and self.benchmark_name)
mt.add_cmd("dret", self.portfolio_name and self.benchmark_name)
mt.add_cmd("distr", self.portfolio_name and self.benchmark_name)
mt.add_cmd("maxdd", self.portfolio_name and self.benchmark_name)
mt.add_cmd("rvol", self.portfolio_name and self.benchmark_name)
mt.add_cmd("rsharpe", self.portfolio_name and self.benchmark_name)
mt.add_cmd("rsort", self.portfolio_name and self.benchmark_name)
mt.add_cmd("rbeta", self.portfolio_name and self.benchmark_name)
mt.add_info("_metrics_")
mt.add_cmd("alloc", self.portfolio_name and self.benchmark_name)
mt.add_cmd("attrib", self.portfolio_name and self.benchmark_name)
mt.add_cmd("summary", self.portfolio_name and self.benchmark_name)
mt.add_cmd("alloc", self.portfolio_name and self.benchmark_name)
mt.add_cmd("attrib", self.portfolio_name and self.benchmark_name)
mt.add_cmd("metric", self.portfolio_name and self.benchmark_name)
mt.add_cmd("perf", self.portfolio_name and self.benchmark_name)
mt.add_info("_risk_")
mt.add_cmd("var", self.portfolio_name and self.benchmark_name)
mt.add_cmd("es", self.portfolio_name and self.benchmark_name)
mt.add_cmd("os", self.portfolio_name and self.benchmark_name)
port = bool(self.portfolio_name)
port_bench = bool(self.portfolio_name) and bool(self.benchmark_name)
help_text = f
# TODO: Clean up the reports inputs
# TODO: Edit the allocation to allow the different asset classes
# [info]Reports:[/info]
# ar annual report for performance of a given portfolio
console.print(text=help_text, menu="Portfolio")
self.update_choices()
| aed683f44015cb5aa6cae9c2ce719c956cda7b46 | 446 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 484 | def print_help(self):
mt = MenuText("portfolio/")
mt.add_menu("bro")
mt.add_menu("po")
mt.add_raw("\n")
mt.add_cmd("load")
mt.add_cmd("show")
mt.add_cmd("bench")
mt.add_raw("\n")
mt.add_param("_loaded", self.portfolio_name)
mt.add_param("_riskfreerate", self.portfolio_name)
mt.add_param("_benchmark", self.benchmark_name)
mt.add_raw("\n")
mt.add_info("_graphs_")
mt.add_cmd("holdv", self.portfolio_name and self.benchmark_name)
mt.add_cmd("holdp", self.portfolio_name and self.benchmark_name)
mt.add_cmd("yret", self.portfolio_name and self.benchmark_name)
mt.add_cmd("mret", self.portfolio_name and self.benchmark_name)
mt.add_cmd("dret", self.portfolio_name and self.benchmark_name)
mt.add_cmd("distr", self.portfolio_name and self.benchmark_name)
mt.add_cmd("maxdd", self.portfolio_name and self.benchmark_name)
mt.add_cmd("rvol", self.portfolio_name and self.benchmark_name)
mt.add_cmd("rsharpe", self.portfolio_name and self.benchmark_name)
mt.add_cmd("rsort", self.portfolio_name and self.benchmark_name)
mt.add_cmd("rbeta", self.portfolio_name and self.benchmark_name)
mt.add_info("_metrics_")
mt.add_cmd("alloc", self.portfolio_name and self.benchmark_name)
mt.add_cmd("attrib", self.portfolio_name and self.benchmark_name)
mt.add_cmd("summary", self.portfolio_name and self.benchmark_name)
mt.add_cmd("alloc", self.portfolio_name and self.benchmark_name)
mt.add_cmd("attrib", self.portfolio_name and self.benchmark_name)
mt.add_cmd("metric", self.portfolio_name and self.benchmark_name)
mt.add_cmd("perf", self.portfolio_name and self.benchmark_name)
mt.add_info("_risk_")
mt.add_cmd("var", self.portfolio_name and self.benchmark_name)
mt.add_cmd("es", self.portfolio_name and self.benchmark_name)
mt.add_cmd("os", self.portfolio_name and self.benchmark_name)
port = bool(self.portfolio_name)
port_bench = bool(self.portfolio_name) and bool(self.benchmark_name)
help_text = f
# TODO: Clean up the reports inputs
# TODO: Edit the allocation to allow the different asset classes
# [info]Reports:[/info]
# ar annual report for performance of a given portfolio
console.print(text=help_text, menu | 21 | 899 | print_help |
|
47 | 1 | 2 | 17 | test/test_prototype_transforms_functional.py | 194,246 | Cleanup prototype transforms tests (#6984)
* minor cleanup of the prototype transforms tests
* refactor ImagePair
* pretty format enum | vision | 12 | Python | 42 | test_prototype_transforms_functional.py | def test_float32_vs_uint8(self, test_id, info, args_kwargs):
(input, *other_args), kwargs = args_kwargs.load("cpu")
if input.dtype != torch.uint8:
pytest.skip(f"Input dtype is {input.dtype}.")
adapted_other_args, adapted_kwargs = info.float32_vs_uint8(other_args, kwargs)
actual = info.kernel(
F.convert_dtype_image_tensor(input, dtype=torch.float32),
*adapted_other_args,
**adapted_kwargs,
)
expected = F.convert_dtype_image_tensor(info.kernel(input, *other_args, **kwargs), dtype=torch.float32)
assert_close(
actual,
expected,
**info.get_closeness_kwargs(test_id, dtype=torch.float32, device=input.device),
msg=parametrized_error_message(*other_args, **kwargs),
)
@pytest.fixture | 74ea933c29898f838991acdf49a70cb20b4ec3ad | @pytest.fixture | 143 | https://github.com/pytorch/vision.git | 189 | def test_float32_vs_uint8(self, test_id, info, args_kwargs):
| 29 | 227 | test_float32_vs_uint8 |
11 | 0 | 1 | 4 | tests/api/common/test_mark_tasks.py | 43,992 | Use `DagRun.run_id` instead of `execution_date` when updating state of TIs(UI & REST API) (#18724)
We can now use run_id as well as execution_date to update states
of task instances
Co-authored-by: Tzu-ping Chung <uranusjr@gmail.com>
Co-authored-by: Ash Berlin-Taylor <ash_github@firemirror.com> | airflow | 9 | Python | 11 | test_mark_tasks.py | def _create_test_dag_run(self, state, date):
return self.dag1.create_dagrun(
run_type=DagRunType.MANUAL, state=state, start_date=date, execution_date=date
)
| 2b4bf7fe67fc656ceb7bdaad36453b7a5b83ef04 | 34 | https://github.com/apache/airflow.git | 35 | def _create_test_dag_run(self, state, date):
return self.dag1.create_dagrun(
run_type=DagRunType.MANUAL, | 11 | 48 | _create_test_dag_run |
|
13 | 0 | 2 | 7 | python3.10.4/Lib/asyncio/queues.py | 220,623 | add python 3.10.4 for windows | XX-Net | 8 | Python | 13 | queues.py | def put_nowait(self, item):
if self.full():
raise QueueFull
self._put(item)
self._unfinished_tasks += 1
self._finished.clear()
self._wakeup_next(self._getters)
| 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 43 | https://github.com/XX-net/XX-Net.git | 66 | def put_nowait(self, item):
| 11 | 73 | put_nowait |
|
24 | 0 | 2 | 7 | src/datasets/arrow_writer.py | 105,908 | Multiprocessed dataset builder [WIP] (#5107)
* multiprocessing-compatible naming scheme and refactor
* multiprocessed shard writing for GeneratorBasedBuilder
* multiprocessed shard writing for ArrowBasedBuilder
* style
* multiprocessed dataset loading
* compatibility with non-sharded datasets
* bugfix
* bugfix
* removed unused import
* fixed bad ordering
* less misleading tqdm
* fix gen_kwargs distribution + read shards
* minor
* minor2
* support beam datasets
* docstrings + minor
* add iflatmap_unordered for parallel write & progress updates
* use 1 tqdm bar receiving updates from subprocesses
* docs
* add test_iflatmap_unordered
* style
* test arrow_reader.py
* fix test_iflatmap_unordered
* add Beam test_download_and_prepare_sharded
* test gen_kwargs distribution
* test download_and_prepare with num_proc
* style
* improve test
* don't close the pool
* fix multiprocessing on windows
* keep multiprocessing disabled by default
* again + docs
* more docs
* more docs
* some var renaming
* style
* Apply suggestions from code review
Co-authored-by: Mario Šaško <mariosasko777@gmail.com>
* Apply suggestions from code review
Co-authored-by: Mario Šaško <mariosasko777@gmail.com>
* added utils/sharding.py
* style
* style
Co-authored-by: Quentin Lhoest <lhoest.q@gmail.com>
Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
Co-authored-by: Mario Šaško <mariosasko777@gmail.com> | datasets | 11 | Python | 21 | arrow_writer.py | def get_parquet_lengths(sources) -> List[int]:
shard_lengths = []
disable = not logging.is_progress_bar_enabled()
for source in logging.tqdm(sources, unit="parquet files", disable=disable):
parquet_file = pa.parquet.ParquetFile(source)
shard_lengths.append(parquet_file.metadata.num_rows)
return shard_lengths
| 2945690ea731f85a356220a71cdc630281c676f4 | 62 | https://github.com/huggingface/datasets.git | 49 | def get_parquet_lengths(sources) -> List[int]:
shard_lengths = []
disable = not logging.is_progress_bar_enabled()
for source in logging.tqdm(sources, unit=" | 18 | 99 | get_parquet_lengths |
|
41 | 0 | 1 | 15 | keras/datasets/mnist.py | 279,397 | Add f-string format and lint with flynt on the whole codebase | keras | 11 | Python | 37 | mnist.py | def load_data(path="mnist.npz"):
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
path,
origin=origin_folder + "mnist.npz",
file_hash=( # noqa: E501
"731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1"
),
)
with np.load(path, allow_pickle=True) as f:
x_train, y_train = f["x_train"], f["y_train"]
x_test, y_test = f["x_test"], f["y_test"]
return (x_train, y_train), (x_test, y_test)
| be73ac1a1e25d9abd4d793cba9707098d7adf231 | 84 | https://github.com/keras-team/keras.git | 127 | def load_data(path="mnist.npz"):
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
path,
origin=origin_folder + "mnist.npz",
file_hash=( # noqa: E501
"731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1"
),
)
with np.load(path, allow_pickle=True) as f:
x_train, y_train = f["x_train"] | 14 | 146 | load_data |
|
53 | 0 | 3 | 19 | python/ray/tune/tests/test_checkpoint_manager.py | 147,994 | [tune] Treat checkpoints with nan value as worst (#23862)
Changes the logic in CheckpointManager to consider checkpoints with nan value of the metric as worst values, meaning they will be deleted first if keep_checkpoints_num is set. | ray | 14 | Python | 44 | test_checkpoint_manager.py | def testBestCheckpointsWithNan(self):
keep_checkpoints_num = 2
checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num)
checkpoints = [
_TuneCheckpoint(
_TuneCheckpoint.PERSISTENT, None, self.mock_result(float("nan"), i)
)
for i in range(2)
]
checkpoints += [
_TuneCheckpoint(_TuneCheckpoint.PERSISTENT, 3, self.mock_result(0, 3))
]
random.shuffle(checkpoints)
for checkpoint in checkpoints:
checkpoint_manager.on_checkpoint(checkpoint)
best_checkpoints = checkpoint_manager.best_checkpoints()
# best_checkpoints is sorted from worst to best
self.assertEqual(len(best_checkpoints), keep_checkpoints_num)
self.assertEqual(best_checkpoints[0].value, None)
self.assertEqual(best_checkpoints[1].value, 3)
| 52eaf020bcd4e8ebeb94af11a8039313a37488d1 | 130 | https://github.com/ray-project/ray.git | 221 | def testBestCheckpointsWithNan(self):
keep_checkpoints_num = 2
checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num)
checkpoints = [
_TuneCheckpoint(
_TuneCheckpoint.PERSISTENT, None, self.mock_result(float("nan"), i)
)
for i in range(2)
]
checkpoints += [
_TuneCheckpoint(_TuneCheckpoint.PERSISTENT, 3, self.mock_result(0, 3))
]
random.shuffle(checkpoints)
for checkpoint in checkpoints:
checkpoint_manager.on_checkpoint(checkpoint)
best_checkpoints = checkpoint_manager.best_checkpoints | 19 | 203 | testBestCheckpointsWithNan |
|
182 | 0 | 13 | 51 | nuitka/freezer/Standalone.py | 178,612 | macOS: Massive improvements for dependency scans
* Was not recursively scanning dependencies and therefore could
miss some of them.
* Made internal functions private.
* Make sure to pass proper "package" value to DLL scans, so it
can include the needed directories.
* Do not mutate information of DLL map, it is used later for
other things and we now detect errors in that. | Nuitka | 17 | Python | 119 | Standalone.py | def copyDllsUsed(source_dir, dist_dir, standalone_entry_points):
# This is complex, because we also need to handle OS specifics.
used_dlls = detectUsedDLLs(
source_dir=source_dir,
standalone_entry_points=standalone_entry_points,
use_cache=not Options.shallNotUseDependsExeCachedResults()
and not Options.getWindowsDependencyTool() == "depends.exe",
update_cache=not Options.shallNotStoreDependsExeCachedResults()
and not Options.getWindowsDependencyTool() == "depends.exe",
)
_removeDuplicateDlls(used_dlls=used_dlls)
dll_map = _copyDllsUsed(dist_dir=dist_dir, used_dlls=used_dlls)
# TODO: This belongs inside _copyDllsUsed
if Utils.isMacOS():
# For macOS, the binary and the DLLs needs to be changed to reflect
# the relative DLL location in the ".dist" folder.
for standalone_entry_point in standalone_entry_points:
_fixupBinaryDLLPathsMacOS(
binary_filename=standalone_entry_point.dest_path,
package_name=standalone_entry_point.package_name,
dll_map=dll_map,
original_location=standalone_entry_point.source_path,
)
for original_path, package_name, dll_filename in dll_map:
_fixupBinaryDLLPathsMacOS(
binary_filename=os.path.join(dist_dir, dll_filename),
package_name=package_name,
dll_map=dll_map,
original_location=original_path,
)
# Remove or update rpath settings.
if Utils.getOS() in ("Linux", "Darwin"):
# For Linux, the "rpath" of libraries may be an issue and must be
# removed.
if Utils.isMacOS():
start = 0
else:
start = 1
for standalone_entry_point in standalone_entry_points[start:]:
count = relpath(
path=standalone_entry_point.dest_path, start=dist_dir
).count(os.path.sep)
rpath = os.path.join("$ORIGIN", *([".."] * count))
setSharedLibraryRPATH(standalone_entry_point.dest_path, rpath)
for _original_path, _package_name, dll_filename in dll_map:
setSharedLibraryRPATH(os.path.join(dist_dir, dll_filename), "$ORIGIN")
if Utils.isMacOS():
addMacOSCodeSignature(
filenames=[
standalone_entry_point.dest_path
for standalone_entry_point in standalone_entry_points
]
+ [
os.path.join(dist_dir, dll_filename)
for _original_path, _package_name, dll_filename in dll_map
]
)
Plugins.onCopiedDLLs(dist_dir=dist_dir, used_dlls=used_dlls)
| a470b75c8e045312ea22dbfb6c5fc6702835b31c | 315 | https://github.com/Nuitka/Nuitka.git | 700 | def copyDllsUsed(source_dir, dist_dir, standalone_entry_points):
# This is complex, because we also need to handle OS specifics.
used_dlls = detectUsedDLLs(
source_dir=source_dir,
standalone_entry_points=standalone_entry_points,
use_cache=not Options.shallNotUseDependsExeCachedResults() | 42 | 493 | copyDllsUsed |
|
103 | 0 | 13 | 31 | homeassistant/components/samsungtv/media_player.py | 294,559 | Add Upnp volume control/status to SamsungTV (#68663)
Co-authored-by: epenet <epenet@users.noreply.github.com>
Co-authored-by: J. Nick Koston <nick@koston.org> | core | 15 | Python | 65 | media_player.py | async def async_update(self) -> None:
if self._auth_failed or self.hass.is_stopping:
return
if self._power_off_in_progress():
self._attr_state = STATE_OFF
else:
self._attr_state = (
STATE_ON if await self._bridge.async_is_on() else STATE_OFF
)
if self._attr_state != STATE_ON:
return
startup_tasks: list[Coroutine[Any, Any, None]] = []
if not self._app_list_event.is_set():
startup_tasks.append(self._async_startup_app_list())
if not self._upnp_device and self._ssdp_rendering_control_location:
startup_tasks.append(self._async_startup_upnp())
if startup_tasks:
await asyncio.gather(*startup_tasks)
if not (service := self._get_upnp_service()):
return
get_volume, get_mute = await asyncio.gather(
service.action("GetVolume").async_call(InstanceID=0, Channel="Master"),
service.action("GetMute").async_call(InstanceID=0, Channel="Master"),
)
LOGGER.debug("Upnp GetVolume on %s: %s", self._host, get_volume)
if (volume_level := get_volume.get("CurrentVolume")) is not None:
self._attr_volume_level = volume_level / 100
LOGGER.debug("Upnp GetMute on %s: %s", self._host, get_mute)
if (is_muted := get_mute.get("CurrentMute")) is not None:
self._attr_is_volume_muted = is_muted
| c024033dae98f01380842dac35be743fbefa0a36 | 252 | https://github.com/home-assistant/core.git | 373 | async def async_update(self) -> None:
if self._auth_failed or self.hass.is_stopping:
return
if self._power_off_in_progress():
self._attr_state = STATE_OFF
else:
self._attr_state = (
STATE_ON if await self._bridge.async_is_on() else STATE_OFF
)
if self._attr_state != STATE_ON:
return
startup_tasks: list[Coroutine[Any, Any, None]] = []
if not self._app_list_event.is_set():
startup_tasks.append(self._async_startup_app_list())
if not self._upnp_device and self._ssdp_rendering_control_location:
startup_tasks.append(self._async_startup_upnp())
if startup_tasks:
await asyncio.gather(*startup_tasks)
if not (service := self._get_upnp_service()):
return
get_volume, get_mute = await asyncio.gather(
service.action("GetVolume").async_call(InstanceID=0, Channel="Master"),
service.action("GetMute").async_call(InstanceID=0, Channel="Master"),
)
LOGGER.debug( | 40 | 415 | async_update |
|
5 | 0 | 1 | 3 | pandas/io/excel/_xlwt.py | 164,696 | DEP: Protect some ExcelWriter attributes (#45795)
* DEP: Deprecate ExcelWriter attributes
* DEP: Deprecate ExcelWriter attributes
* Fixup for test
* Move tests and restore check_extension
y
* Deprecate xlwt fm_date and fm_datetime; doc improvements | pandas | 8 | Python | 5 | _xlwt.py | def fm_datetime(self):
self._deprecate("fm_datetime")
return self._fm_datetime
| 047137ce2619cfe2027e3999dfb92eb614d9a485 | 16 | https://github.com/pandas-dev/pandas.git | 26 | def fm_datetime(self):
self._deprecate("fm_datetime")
r | 4 | 31 | fm_datetime |
|
67 | 0 | 6 | 14 | keras/layers/preprocessing/index_lookup.py | 280,543 | Add Preprocessing Layer support in Keras v3 saving (IntegerLookup, StringLoop, TextVectorization).
PiperOrigin-RevId: 491682409 | keras | 14 | Python | 50 | index_lookup.py | def _load_assets(self, dir_path):
if self.input_vocabulary:
# Vocab saved in config.
# TODO: consider unifying both paths.
return
vocabulary_filepath = tf.io.gfile.join(dir_path, "vocabulary.txt")
# TODO: fix bug with include_special_tokens and set reload from file.
with open(vocabulary_filepath, "r") as f:
lines = f.read().split("\n")
if tf.as_dtype(self.vocabulary_dtype) == tf.string:
values = [str(line) for line in lines]
else:
values = [int(line) for line in lines]
if self.output_mode == TF_IDF:
self.set_vocabulary(values, idf_weights=False)
else:
self.set_vocabulary(values)
| dc1fe7f95b389e1bda9056ba53e739821fbe8e6e | 114 | https://github.com/keras-team/keras.git | 242 | def _load_assets(self, dir_path):
if self.input_vocabulary:
# Vocab saved in config.
# TODO: consider unifying both paths.
return
| 25 | 192 | _load_assets |
|
8 | 0 | 1 | 22 | tests/test_schema.py | 14,095 | Fix regression in handling of nested dataclasses in `get_flat_models_from_field` (#3819)
* add test for nested python dataclass schema generation
* fix handling of dataclasses in `get_flat_models_from_field`
* add change note | pydantic | 7 | Python | 8 | test_schema.py | def test_nested_python_dataclasses():
from dataclasses import dataclass as python_dataclass
| faee3301eb2c0d4157150a2f4cde2b4edb32ac8e | 118 | https://github.com/pydantic/pydantic.git | 14 | def test_nested_python_dataclasses():
from dataclasses import dataclass as python_dataclass
| 4 | 22 | test_nested_python_dataclasses |
|
12 | 0 | 3 | 5 | IPython/conftest.py | 208,416 | make sure to run async tests
there are some `async def` tests, but they are skipped without `mark("asyncio")` | ipython | 12 | Python | 12 | conftest.py | def pytest_collection_modifyitems(items):
for item in items:
if inspect.iscoroutinefunction(item.obj):
item.add_marker("asyncio")
assert not inspect.isasyncgenfunction(item.obj)
| 82d1a374575d9785708f144976cf139c76c7acb7 | 37 | https://github.com/ipython/ipython.git | 43 | def pytest_collection_modifyitems(items):
for item in items:
if inspect.iscoroutinefunction(item.obj):
item.add_marker("asyncio")
assert not inspect.isasync | 8 | 64 | pytest_collection_modifyitems |
|
110 | 0 | 1 | 13 | ivy/backends/numpy/core/image.py | 213,537 | renamed dev_str arg to dev for all methods. | ivy | 14 | Python | 48 | image.py | def gradient_image(x):
x_shape = _ivy.shape(x)
batch_shape = x_shape[:-3]
image_dims = x_shape[-3:-1]
dev = _ivy.dev(x)
# to list
batch_shape = list(batch_shape)
image_dims = list(image_dims)
num_dims = x_shape[-1]
# BS x H-1 x W x D
dy = x[..., 1:, :, :] - x[..., :-1, :, :]
# BS x H x W-1 x D
dx = x[..., :, 1:, :] - x[..., :, :-1, :]
# BS x H x W x D
dy = _ivy.concatenate((dy, _ivy.zeros(batch_shape + [1, image_dims[1], num_dims], dev=dev)), -3)
dx = _ivy.concatenate((dx, _ivy.zeros(batch_shape + [image_dims[0], 1, num_dims], dev=dev)), -2)
# BS x H x W x D, BS x H x W x D
return dy, dx
| d743336b1f3654cd0315f380f43eed4116997c1d | 184 | https://github.com/unifyai/ivy.git | 163 | def gradient_image(x):
x_shape = _ivy.shape(x)
batch_shape = x_shape[:-3]
image_dims = x_shape[-3:-1]
dev = _ivy.dev(x)
# to list
batch_sh | 14 | 280 | gradient_image |
|
147 | 0 | 7 | 44 | homeassistant/components/http/__init__.py | 297,869 | String formatting and max line length - Part 2 (#84393) | core | 16 | Python | 87 | __init__.py | def _create_ssl_context(self) -> ssl.SSLContext | None:
context: ssl.SSLContext | None = None
assert self.ssl_certificate is not None
try:
if self.ssl_profile == SSL_INTERMEDIATE:
context = ssl_util.server_context_intermediate()
else:
context = ssl_util.server_context_modern()
context.load_cert_chain(self.ssl_certificate, self.ssl_key)
except OSError as error:
if not self.hass.config.safe_mode:
raise HomeAssistantError(
f"Could not use SSL certificate from {self.ssl_certificate}:"
f" {error}"
) from error
_LOGGER.error(
"Could not read SSL certificate from %s: %s",
self.ssl_certificate,
error,
)
try:
context = self._create_emergency_ssl_context()
except OSError as error2:
_LOGGER.error(
"Could not create an emergency self signed ssl certificate: %s",
error2,
)
context = None
else:
_LOGGER.critical(
"Home Assistant is running in safe mode with an emergency self"
" signed ssl certificate because the configured SSL certificate was"
" not usable"
)
return context
if self.ssl_peer_certificate:
if context is None:
raise HomeAssistantError(
"Failed to create ssl context, no fallback available because a peer"
" certificate is required."
)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(self.ssl_peer_certificate)
return context
| cb13418babd21a1e9584978b0c523f1b1e4e1cb0 | 173 | https://github.com/home-assistant/core.git | 731 | def _create_ssl_context(self) -> ssl.SSLContext | None:
context: ssl.SSLContext | None = None
assert self.ssl_certificate is not None
try:
if self.ssl_profile == SSL_INTERMEDIATE:
context = ssl_util.server_context_intermediate()
else:
context = ssl_util.server_context_modern()
contex | 27 | 303 | _create_ssl_context |
|
9 | 0 | 1 | 11 | certbot-nginx/certbot_nginx/_internal/configurator.py | 186,574 | Fully type certbot-nginx module (#9124)
* Work in progress
* Fix type
* Work in progress
* Work in progress
* Work in progress
* Work in progress
* Work in progress
* Oups.
* Fix typing in UnspacedList
* Fix logic
* Finish typing
* List certbot-nginx as fully typed in tox
* Fix lint
* Fix checks
* Organize imports
* Fix typing for Python 3.6
* Fix checks
* Fix lint
* Update certbot-nginx/certbot_nginx/_internal/configurator.py
Co-authored-by: alexzorin <alex@zor.io>
* Update certbot-nginx/certbot_nginx/_internal/configurator.py
Co-authored-by: alexzorin <alex@zor.io>
* Fix signature of deploy_cert regarding the installer interface
* Update certbot-nginx/certbot_nginx/_internal/obj.py
Co-authored-by: alexzorin <alex@zor.io>
* Fix types
* Update certbot-nginx/certbot_nginx/_internal/parser.py
Co-authored-by: alexzorin <alex@zor.io>
* Precise type
* Precise _coerce possible inputs/outputs
* Fix type
* Update certbot-nginx/certbot_nginx/_internal/http_01.py
Co-authored-by: ohemorange <ebportnoy@gmail.com>
* Fix type
* Remove an undesirable implementation.
* Fix type
Co-authored-by: alexzorin <alex@zor.io>
Co-authored-by: ohemorange <ebportnoy@gmail.com> | certbot | 9 | Python | 9 | configurator.py | def recovery_routine(self) -> None:
super().recovery_routine()
self.new_vhost = None
self.parser.load()
| 16aad35d31a887dab157f9d4f5e0fe9218d06064 | 27 | https://github.com/certbot/certbot.git | 37 | def recovery_routine(self) -> None:
super().recovery_routine()
self.new_vho | 6 | 49 | recovery_routine |
|
26 | 1 | 1 | 7 | tests/indices/embedding/test_base.py | 225,796 | Add general embedding abstraction (#79)
Co-authored-by: Jerry Liu <jerry@robustintelligence.com> | llama_index | 8 | Python | 21 | test_base.py | def test_embedding_similarity() -> None:
embed_model = OpenAIEmbedding()
text_embedding = [3.0, 4.0, 0.0]
query_embedding = [0.0, 1.0, 0.0]
cosine = embed_model.similarity(query_embedding, text_embedding)
assert cosine == 0.8
@pytest.fixture | a48611ee12b6758d752b4ca2f3f640f94234522d | @pytest.fixture | 58 | https://github.com/jerryjliu/llama_index.git | 43 | def test_embedding_similarity() -> None:
embed_model = OpenAIEmbedding()
text_embedding = [3.0, 4.0, 0.0]
query_embedding = [0.0, 1.0, 0.0]
cosine = embed_model.similarity(query_embeddin | 9 | 76 | test_embedding_similarity |
62 | 0 | 1 | 8 | sympy/integrals/rubi/tests/test_utility_function.py | 195,758 | Fixed stuff in rubi | sympy | 13 | Python | 31 | test_utility_function.py | def test_TrigReduce():
assert TrigReduce(cos(x)**2) == cos(2*x)/2 + S.Half
assert TrigReduce(cos(x)**2*sin(x)) == sin(x)/4 + sin(3*x)/4
assert TrigReduce(cos(x)**2+sin(x)) == sin(x) + cos(2*x)/2 + S.Half
assert TrigReduce(cos(x)**2*sin(x)**5) == 5*sin(x)/64 + sin(3*x)/64 - 3*sin(5*x)/64 + sin(7*x)/64
assert TrigReduce(2*sin(x)*cos(x) + 2*cos(x)**2) == sin(2*x) + cos(2*x) + 1
assert TrigReduce(sinh(a + b*x)**2) == cosh(2*a + 2*b*x)/2 - S.Half
assert TrigReduce(sinh(a + b*x)*cosh(a + b*x)) == sinh(2*a + 2*b*x)/2
| ed4b2f2458b02f18df0e4449be38269ef88b90f6 | 255 | https://github.com/sympy/sympy.git | 82 | def test_TrigReduce():
assert TrigReduce(cos(x)**2) == cos(2*x)/2 + S.Half
assert TrigReduce(cos(x)**2*sin(x | 11 | 401 | test_TrigReduce |
|
51 | 0 | 2 | 7 | lib/streamlit/in_memory_file_manager.py | 118,558 | Rename and refactor `Report` machinery (#4141)
This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app". | streamlit | 9 | Python | 45 | in_memory_file_manager.py | def _get_session_id() -> str:
ctx = get_script_run_ctx()
if ctx is None:
# This is only None when running "python myscript.py" rather than
# "streamlit run myscript.py". In which case the session ID doesn't
# matter and can just be a constant, as there's only ever "session".
return "dontcare"
else:
return ctx.session_id
| 704eab3478cf69847825b23dabf15813a8ac9fa2 | 25 | https://github.com/streamlit/streamlit.git | 98 | def _get_session_id() -> str:
ctx | 5 | 50 | _get_session_id |
|
37 | 0 | 4 | 9 | homeassistant/components/mqtt/climate.py | 300,243 | Use climate enums in mqtt (#70696) | core | 11 | Python | 33 | climate.py | async def async_set_swing_mode(self, swing_mode):
# CONF_SEND_IF_OFF is deprecated, support will be removed with release 2022.9
if self._send_if_off or self._current_operation != HVACMode.OFF:
payload = self._command_templates[CONF_SWING_MODE_COMMAND_TEMPLATE](
swing_mode
)
await self._publish(CONF_SWING_MODE_COMMAND_TOPIC, payload)
if self._topic[CONF_SWING_MODE_STATE_TOPIC] is None:
self._current_swing_mode = swing_mode
self.async_write_ha_state()
| 1be2438ef67c7f523654bdb849cbed5f4c865365 | 61 | https://github.com/home-assistant/core.git | 135 | async def async_set_swing_mode(self, swing_mode):
# CONF_SEND_IF_OFF is deprecated, support will be removed with release 2022.9
if self._send_if_off or self._current_operation != HVACMode.OFF:
payload = self._command_templates[CONF_SWING_MODE_COMMAND_TEMPLATE](
swing_mode
)
await self._publish(CONF_SWING_MODE_COMMAND_TOPIC, payload)
if self._topic[CONF_SWING_MODE_STATE_ | 16 | 100 | async_set_swing_mode |
|
41 | 0 | 1 | 17 | keras/benchmarks/model_components_benchmarks_test.py | 279,189 | Code reformated | keras | 17 | Python | 31 | model_components_benchmarks_test.py | def _run(self, func, num_iters, execution_mode=None):
total_time = run_benchmark(func, num_iters, execution_mode)
mean_us = total_time * 1e6 / num_iters
self.report_benchmark(
iters=num_iters,
wall_time=mean_us,
metrics=[
{
"name": "exp_per_sec",
"value": float(f"{num_iters / total_time:.3f}"),
},
{
"name": "us_per_exp",
"value": float(f"{total_time * 1000000.0 / num_iters:.3f}"),
},
],
)
| afd86e95fc91b98dfb30eac27933b1e10b201b97 | 78 | https://github.com/keras-team/keras.git | 248 | def _run(self, func, num_iters, execution_mode=None):
total_time = run_benchmark(func, num_iters, execution_mode)
mean_us = total_time * 1e6 / num_iters
self.report_benchmark(
iters=num_iters,
wall_time=mean_us,
metrics=[
{
"name": "exp_per_sec",
"value": float(f"{num_iters / total_time:.3f}"),
},
{
"name": "us_per_exp",
"value": float(f"{total_time * 1000000.0 / num_iters:.3f}"),
},
],
)
| 13 | 144 | _run |
|
16 | 0 | 1 | 3 | plugins/dbms/maxdb/enumeration.py | 123,618 | Fixing DeprecationWarning (logger.warn) | sqlmap | 7 | Python | 16 | enumeration.py | def getHostname(self):
warnMsg = "on SAP MaxDB it is not possible to enumerate the hostname"
logger.warning(warnMsg)
| df4293473d2fb6e887e31522cab5aff95e201581 | 14 | https://github.com/sqlmapproject/sqlmap.git | 29 | def getHostname(self):
warnMsg = "on SAP MaxDB it is not possible to enumerate the hostname"
logger. | 5 | 26 | getHostname |
|
40 | 0 | 3 | 11 | sklearn/impute/tests/test_common.py | 261,582 | ENH keep features with all missing values during imputation (#24770)
Co-authored-by: Chiara Marmo <cmarmo@users.noreply.github.com>
Co-authored-by: Julien Jerphanion <git@jjerphan.xyz>
Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>
Co-authored-by: Vitor SRG <vitorssrg@gmail.com>
Fixes https://github.com/scikit-learn/scikit-learn/pull/16695
Fixes https://github.com/scikit-learn/scikit-learn/issues/16426
Fixes https://github.com/scikit-learn/scikit-learn/issues/16977 | scikit-learn | 15 | Python | 34 | test_common.py | def test_keep_empty_features(imputer, keep_empty_features):
X = np.array([[np.nan, 1], [np.nan, 2], [np.nan, 3]])
imputer = imputer.set_params(
add_indicator=False, keep_empty_features=keep_empty_features
)
for method in ["fit_transform", "transform"]:
X_imputed = getattr(imputer, method)(X)
if keep_empty_features:
assert X_imputed.shape == X.shape
else:
assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)
| d8fa96c29828e3ca79ddd5d7466521ac4d95213c | 109 | https://github.com/scikit-learn/scikit-learn.git | 105 | def test_keep_empty_features(imputer, keep_empty_features):
X = np.array([[np.nan, 1], [np.nan, 2], [np.nan, 3]])
imputer = imputer.set_params(
add_indicator=False, keep_empty_features=keep_empty_features
)
for method in ["fit_transform", "transform"]:
X_imputed = getattr(imputer, method)(X)
if keep_empty_features:
assert X_imputed.shape == X.shape
else:
assert X_imputed.shap | 13 | 165 | test_keep_empty_features |
|
143 | 0 | 2 | 35 | tests/test_plotting.py | 148,448 | Fix some tests after drawdown calculation change | freqtrade | 14 | Python | 93 | test_plotting.py | def test_generate_profit_graph(testdatadir):
filename = testdatadir / "backtest-result_test.json"
trades = load_backtest_data(filename)
timerange = TimeRange.parse_timerange("20180110-20180112")
pairs = ["TRX/BTC", "XLM/BTC"]
trades = trades[trades['close_date'] < pd.Timestamp('2018-01-12', tz='UTC')]
data = history.load_data(datadir=testdatadir,
pairs=pairs,
timeframe='5m',
timerange=timerange)
trades = trades[trades['pair'].isin(pairs)]
fig = generate_profit_graph(pairs, data, trades, timeframe="5m", stake_currency='BTC')
assert isinstance(fig, go.Figure)
assert fig.layout.title.text == "Freqtrade Profit plot"
assert fig.layout.yaxis.title.text == "Price"
assert fig.layout.yaxis2.title.text == "Profit BTC"
assert fig.layout.yaxis3.title.text == "Profit BTC"
figure = fig.layout.figure
assert len(figure.data) == 7
avgclose = find_trace_in_fig_data(figure.data, "Avg close price")
assert isinstance(avgclose, go.Scatter)
profit = find_trace_in_fig_data(figure.data, "Profit")
assert isinstance(profit, go.Scatter)
drawdown = find_trace_in_fig_data(figure.data, "Max drawdown 35.69%")
assert isinstance(drawdown, go.Scatter)
parallel = find_trace_in_fig_data(figure.data, "Parallel trades")
assert isinstance(parallel, go.Scatter)
underwater = find_trace_in_fig_data(figure.data, "Underwater Plot")
assert isinstance(underwater, go.Scatter)
for pair in pairs:
profit_pair = find_trace_in_fig_data(figure.data, f"Profit {pair}")
assert isinstance(profit_pair, go.Scatter)
with pytest.raises(OperationalException, match=r"No trades found.*"):
# Pair cannot be empty - so it's an empty dataframe.
generate_profit_graph(pairs, data, trades.loc[trades['pair'].isnull()], timeframe="5m",
stake_currency='BTC')
| 09fae25c9426cd03df2f2d031d16a6e3b1533a55 | 337 | https://github.com/freqtrade/freqtrade.git | 364 | def test_generate_profit_graph(testdatadir):
filename = testdatadir / "backtest-result_test.json"
trades = load_backtest_data(filename)
timerange = TimeRange.parse_timerange("20180110-20180112")
pairs = ["TRX/BTC", "XLM/BTC"]
trades = trades[trades['close_date'] < pd.Timestamp('2018-01-12', tz='UTC')]
data = history.load_data(datadir=testdatadir,
pairs=pairs,
timeframe='5m',
timerange=timerange)
trades = trades[trades['pair'].isin(pairs)]
fig = generate_profit_graph(pairs, data, trades, timef | 47 | 551 | test_generate_profit_graph |
|
41 | 0 | 3 | 25 | nltk/tokenize/util.py | 42,550 | Docstring tests (#3050)
* fixed pytests
* fixed more pytests
* fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py
* fixed pytests (mainly multiline or rounding issues)
* fixed treebank pytests, removed test for return_string=True (deprecated)
* fixed destructive.py pytests, removed test for return_string=True (deprecated)
* fixed pytest (rounding issues)
* fixed pytest (initialised missing object)
* fixed pytest (formatting issues)
* fixed pytest (formatting issues)
* fixed pytest (formatting issues)
* added pytest +SKIP for deprecated module stanford
* updated AUTHORS.md
* changed docstring corrections by usage of ELLIPSIS and different roundings
* fixed AUTHORS.md to be consistent
* Fix framenet doctest formatting with pprint
* Change docstring on MultiListBox.__init__
I believe the original typo was misinterpreted and changed to something that was not originally intended.
Co-authored-by: Jan Lennartz <jan.lennartz@ing.com>
Co-authored-by: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com>
Co-authored-by: Tom Aarsen <Cubiegamedev@gmail.com> | nltk | 11 | Python | 34 | util.py | def regexp_span_tokenize(s, regexp):
rGood muffins cost $3.88\nin New York. Please buy me
... two of them.\n\nThanks.
left = 0
for m in finditer(regexp, s):
right, next = m.span()
if right != left:
yield left, right
left = next
yield left, len(s)
| 8a4cf5d94eb94b6427c5d1d7907ba07b119932c5 | 50 | https://github.com/nltk/nltk.git | 92 | def regexp_span_tokenize(s, regexp):
rGood muffins cost $3.88\nin New York. Please buy me
... two of them.\n\nThanks.
| 10 | 81 | regexp_span_tokenize |
|
20 | 0 | 1 | 9 | wagtail/images/tests/test_admin_views.py | 77,612 | Allow images to be sorted by date, title or file size | wagtail | 12 | Python | 18 | test_admin_views.py | def test_default_ordering_used_if_invalid_ordering_provided(self):
response = self.get({"ordering": "bogus"})
self.assertEqual(response.status_code, 200)
context = response.context
default_ordering = "-created_at"
self.assertEqual(context["current_ordering"], default_ordering)
self.assertEqual(
context["images"].object_list.query.order_by, (default_ordering,)
)
| b1614930eb74e3bdab28c5f04949347f43ae6fa7 | 66 | https://github.com/wagtail/wagtail.git | 79 | def test_default_ordering_used_if_invalid_ordering_provided(self):
response = self.get({"ordering": "bogus"})
self.assertEqual(response.status_code, 200)
context = | 11 | 111 | test_default_ordering_used_if_invalid_ordering_provided |
|
11 | 0 | 2 | 4 | airflow/providers/elasticsearch/log/es_task_handler.py | 46,460 | Update black precommit (#22521)
Use latest version of black, drop py36, and add py310. | airflow | 13 | Python | 11 | es_task_handler.py | def emit(self, record):
if self.handler:
record.offset = int(time() * (10**9))
self.handler.emit(record)
| c063fc688cf20c37ed830de5e3dac4a664fd8241 | 36 | https://github.com/apache/airflow.git | 39 | def emit(self, record):
if self.handler:
record.of | 7 | 58 | emit |
|
30 | 0 | 1 | 8 | mmdet/models/dense_heads/maskformer_head.py | 244,041 | [Feature] Add Maskformer to mmdet (#7212)
* first commit
* add README
* move model description from config to readme
add description for binary_input
add description for dice loss
add a independent panoptic gt processing function
add a independent panoptic gt processing function
remove compatibility of pretrain in maskformer
* update comments in maskformer_head
* update docs format | mmdetection | 9 | Python | 20 | maskformer_head.py | def preprocess_gt(self, gt_labels_list, gt_masks_list, gt_semantic_segs):
num_things_list = [self.num_things_classes] * len(gt_labels_list)
num_stuff_list = [self.num_stuff_classes] * len(gt_labels_list)
targets = multi_apply(preprocess_panoptic_gt, gt_labels_list,
gt_masks_list, gt_semantic_segs, num_things_list,
num_stuff_list)
labels, masks = targets
return labels, masks
| cac356380d505bf15587f07c0529218cc36b9652 | 61 | https://github.com/open-mmlab/mmdetection.git | 130 | def preprocess_gt(self, gt_labels_list, gt_masks_list, gt_semantic_segs):
num_things_list = [self.num_things_classes] * len(gt_labels_list)
num_stuff_list = [self.num_stuff_classes] * len(gt_labels_list)
targets = multi_apply(preprocess_panoptic_gt, gt_labels_list,
gt_masks_list, gt_semantic_segs, num_things_list,
num_stuff_list)
labels, masks = targets
return labels, mas | 15 | 94 | preprocess_gt |