n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
11
0
2
4
django/test/testcases.py
202,973
Refs #33348 -- Deprecated passing errors=None to SimpleTestCase.assertFormError()/assertFormsetErrors().
django
9
Python
10
testcases.py
def to_list(value): if not isinstance(value, list): value = [value] return value
c67e1cf44f17c36139e25b1eae92216cb8baad77
22
https://github.com/django/django.git
27
def to_list(value): if not isinstance(value, list): value = [value]
4
38
to_list
63
0
6
21
homeassistant/components/rachio/webhooks.py
309,509
Import webhook (#64102) * Import webhook * Adjust webhook trigger * Fix pylint * Add type hints to async_handle_webhook * Revert changes to netatmo Co-authored-by: epenet <epenet@users.noreply.github.com>
core
15
Python
36
webhooks.py
async def async_get_or_create_registered_webhook_id_and_url(hass, entry): config = entry.data.copy() updated_config = False webhook_url = None if not (webhook_id := config.get(CONF_WEBHOOK_ID)): webhook_id = webhook.async_generate_id() config[CONF_WEBHOOK_ID] = webhook_id updated_config = True if hass.components.cloud.async_active_subscription(): if not (cloudhook_url := config.get(CONF_CLOUDHOOK_URL)): cloudhook_url = await hass.components.cloud.async_create_cloudhook( webhook_id ) config[CONF_CLOUDHOOK_URL] = cloudhook_url updated_config = True webhook_url = cloudhook_url if not webhook_url: webhook_url = webhook.async_generate_url(hass, webhook_id) if updated_config: hass.config_entries.async_update_entry(entry, data=config) return webhook_id, webhook_url
44a686931e0cdfd874539f27276aae849243229c
134
https://github.com/home-assistant/core.git
198
async def async_get_or_create_registered_webhook_id_and_url(hass, entry): config = entry.data.copy() updated_config = False webhook_url = None if not (webhook_id := config.get(CONF_WEBHOOK_ID)): webhook_id = webhook.async_generate_id() config[CONF_WEBHOOK_ID] = webhook_id updated_config = True if hass.components.cloud.async_active_subscription(): if not (cloudhook_url := config.get(CONF_CLOUDHOOK_URL)): cloudhook_url = await hass.components.cloud.async_create_cloudhook( webhook_id ) config[CONF_CLOUDHOOK_URL] = cloudhook_url upda
22
218
async_get_or_create_registered_webhook_id_and_url
59
0
2
17
tests/test_number_line.py
189,901
Add vectorized plotting functionality to improve performance (#2739) * enhancement(ParametricFunction, CoordinateSystem, Axes, NumberLine): vectorized coords_to_points,plot,plot_parametric_function; added Numberline:number_to_point_array * test(plot_log_x_axis_vectorized): added test for vectorized plotting * extend(angle_of_vector): added test for angle_of_vector with ndarray as input * fix(frames_comparison): fix naming of test data to be able to write and read the file name because ':' is forbidden * test(plot): add more vectorized tests, added use_vectorized fixture to make life simpler * fix(coordinate_system,number_line,scale): vectorizing functions and fixing bugs * enhancement(NumberLine): vectorized number_to_point and added test * enhancement(NumberLine): added tests for point_to_number, added example to doc * enhancement(CoordinateSystem): added test for coords_to_point_vectorized and vectorized coords_to_point * enhancement(Axes): vectorized point_to_coords and added tests * Minor formatting fixes and doctests * fixed flake hint with generator expression * Create __init__.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update documentation for antiderivative * Update manim/mobject/graphing/coordinate_systems.py Co-authored-by: Raghav Goel <raghavgd2h@gmail.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update manim/mobject/graphing/coordinate_systems.py Co-authored-by: Benjamin Hackl <devel@benjamin-hackl.at> * Update manim/mobject/graphing/coordinate_systems.py Co-authored-by: Benjamin Hackl <devel@benjamin-hackl.at> * Update manim/mobject/graphing/functions.py Co-authored-by: Benjamin Hackl <devel@benjamin-hackl.at> * Update manim/mobject/graphing/coordinate_systems.py Co-authored-by: Benjamin Hackl <devel@benjamin-hackl.at> * Update manim/mobject/graphing/coordinate_systems.py Co-authored-by: Benjamin Hackl <devel@benjamin-hackl.at> * fixed wrong indentation * stop doctest from leaking change in global config * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * change code block type * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Raghav Goel <raghavgd2h@gmail.com> Co-authored-by: Benjamin Hackl <devel@benjamin-hackl.at>
manim
9
Python
39
test_number_line.py
def test_point_to_number(): line = NumberLine() points = [ [1.0, 0.0, 0.0], [2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [4.0, 0.0, 0.0], [5.0, 0.0, 0.0], ] points_np = np.array(points) expected = [1, 2, 3, 4, 5] num_1 = [line.point_to_number(point) for point in points] num_2 = line.point_to_number(points) num_3 = line.point_to_number(points_np) np.testing.assert_array_equal(np.round(num_1, 4), np.round(expected, 4)) np.testing.assert_array_equal(np.round(num_2, 4), np.round(expected, 4)) np.testing.assert_array_equal(np.round(num_3, 4), np.round(expected, 4))
9359331effffcdf6e6c63718002c8fd576cc8c77
206
https://github.com/ManimCommunity/manim.git
126
def test_point_to_number(): line = NumberLine() points = [ [1.0, 0.0, 0.0], [2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [4.0, 0.0, 0.0], [5.0, 0.0, 0.0], ] points_np = np.array(points) expected = [1, 2, 3, 4, 5] num_1 = [line.point_to_number(point) for point in points] num_2 = line.point_to_number(points) num_3 = line.point_to_number(points_np) np.testing.assert_array_equal(np.round(num_1, 4), np.round(expected, 4)) np.testing.assert_array_equal(np.round(num_2, 4), np.round(expect
16
250
test_point_to_number
12
0
1
3
numpy/lib/tests/test_loadtxt.py
159,948
TST: Some tests for control character collisions. Adds some tests for the behavior of control characters, e.g. comments, delimiter and quotechar, when they have the same value. At this stage, these tests are more to frame the discussion about what the behavior should be, not to test what it currently is. I personally think raising an exception is correct for most of these situations, though it's worth noting that np.loadtxt currently doesn't for most of these corner cases (and seems to randomly assign precedence to delimiter over comments or vice versa depending on the values).
numpy
12
Python
12
test_loadtxt.py
def test_delimiter_quotechar_collision_raises(): with pytest.raises(TypeError, match="control characters.*are identical"): np.loadtxt(StringIO("1, 2, 3"), delimiter=",", quotechar=",")
b335431699f86ab523dc6dba2c91efc799f4372b
33
https://github.com/numpy/numpy.git
21
def test_delimiter_quotechar_collision_raises(): wi
10
61
test_delimiter_quotechar_collision_raises
222
0
6
87
discordbot/stocks/dark_pool_shorts/psi.py
281,167
Bot logging fix (#1105) * Write bot logs to stdout instead of a file Heroku's logging uses the stdout and has problems with files * Send "you snooze you lose" only if debug flag is enabled * Replace print statements with logger entries in the economy menu * Add logging to bot menu command calls * Silence bandit warnings about the REPLACE_ME token * Organize imports and update logging in economy menu * Organize imports and update logging in dps menu * Organize imports and update logging in dd menu * Organize imports and update logging in gov menu * Organize imports and update logging in options menu * Organize imports and update logging in screener menu * Organize imports and update logging in ta menu * Revert automatic import sorting * Add logging to the options reaction helper
OpenBBTerminal
14
Python
148
psi.py
async def psi_command(ctx, ticker=""): try: # Debug user input if cfg.DEBUG: logger.debug("!stocks.dps.psi %s", ticker) # Check for argument if ticker == "": raise Exception("Stock ticker is required") ticker = ticker.upper() stock = yf.download(ticker, progress=False) if stock.empty: raise Exception("Stock ticker is invalid") # Retrieve data df, prices = stockgrid_model.get_short_interest_volume(ticker) # Debug user output if cfg.DEBUG: logger.debug(df.to_string()) # Output data title = f"Stocks: [Stockgrid] Price vs Short Interest Volume {ticker}" embed = discord.Embed(title=title, colour=cfg.COLOR) embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) _, axes = plt.subplots( 2, 1, dpi=PLOT_DPI, gridspec_kw={"height_ratios": [2, 1]}, ) axes[0].bar( df["date"], df["total_volume"] / 1_000_000, width=timedelta(days=1), color="b", alpha=0.4, label="Total Volume", ) axes[0].bar( df["date"], df["short_volume"] / 1_000_000, width=timedelta(days=1), color="r", alpha=0.4, label="Short Volume", ) axes[0].set_ylabel("Volume (1M)") ax2 = axes[0].twinx() ax2.plot( df["date"].values, prices[len(prices) - len(df) :], # noqa: E203 c="k", label="Price", ) ax2.set_ylabel("Price ($)") lines, labels = axes[0].get_legend_handles_labels() lines2, labels2 = ax2.get_legend_handles_labels() ax2.legend(lines + lines2, labels + labels2, loc="upper left") axes[0].grid() axes[0].ticklabel_format(style="plain", axis="y") plt.title(f"Price vs Short Volume Interest for {ticker}") plt.gcf().autofmt_xdate() axes[1].plot( df["date"].values, 100 * df["short_volume%"], c="green", label="Short Vol. %", ) axes[1].set_ylabel("Short Vol. %") axes[1].grid(axis="y") lines, labels = axes[1].get_legend_handles_labels() axes[1].legend(lines, labels, loc="upper left") axes[1].set_ylim([0, 100]) file_name = ticker + "_psi.png" plt.savefig(file_name) plt.close("all") uploaded_image = gst_imgur.upload_image(file_name, title="something") image_link = uploaded_image.link embed.set_image(url=image_link) os.remove(file_name) await ctx.send(embed=embed) except Exception as e: embed = discord.Embed( title=f"ERROR Stocks: [Stockgrid] Price vs Short Interest Volume {ticker}", colour=cfg.COLOR, description=e, ) embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) await ctx.send(embed=embed)
f40ba0d256a78ab2b8461f0df3a9a52ca7dc5704
576
https://github.com/OpenBB-finance/OpenBBTerminal.git
995
async def psi_command(ctx, ticker=""): try: # Debug user input if cfg.DEBUG: logger.debug("!stocks.dps.psi %s", ticker) # Check for argument if ticker == "": raise Exception("Stock ticker is required") ticker = ticker.upper() stock = yf.download(ticker, progress=False) if stock.empty: raise Exception("Stock ticker is invalid") # Retrieve data df, prices = stockgrid_model.get_short_interest_volume(ticker) # Debug user output if cfg.DEBUG: logger.debug(df.to_string()) # Output data title = f"Stocks: [Stockgrid] Price vs Short Interest Volume {ticker}" embed = discord.Embed(title=title, colour=cfg.COLOR) embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) _, axes = plt.subplots( 2, 1, dpi=PLOT_DPI, gridspec_kw={"height_ratios": [2, 1]}, ) axes[0].bar( df["date"], df["total_volume"] / 1_000_000, width=timedelta(days=1), color="b", alpha=0.4, label="Total Volume", ) axes[0].bar( df["date"], df["short_volume"] / 1_000_000, width=timedelta(days=1), color="r", alpha=0.4, label="Short Volume", ) axes[0].set_ylabel("Volume (1M)") ax2 = axes[0].twinx() ax2.plot( df["date"].values, prices[len(prices) - len(df) :], # noqa: E203 c="k", label="Price", ) ax2.set_ylabel("Price ($)") lines, labels = axes[0].get_legend_handles_labels() lines2, labels2 = ax2.get_legend_handles_labels()
80
949
psi_command
25
0
1
6
onnx/test/shape_inference_test.py
255,801
Use Python type annotations rather than comments (#3962) * These have been supported since Python 3.5. ONNX doesn't support Python < 3.6, so we can use the annotations. Diffs generated by https://pypi.org/project/com2ann/. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Remove MYPY conditional logic in gen_proto.py It breaks the type annotations and shouldn't be needed. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Get rid of MYPY bool from more scripts Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * move Descriptors class above where its referenced in type annotation Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fixes Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * remove extra blank line Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotations Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotation in gen_docs Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix Operators.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix TestCoverage.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix protoc-gen-mypy.py Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
onnx
13
Python
22
shape_inference_test.py
def test_nonzero_existing_dim_param(self) -> None: graph = self._make_graph( [('x', TensorProto.FLOAT, (3,))], [make_node('NonZero', ['x'], ['y'])], [make_tensor_value_info('y', TensorProto.INT64, (None, 'NZ'))]) self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (1, 'NZ'))]) # type: ignore
83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd
83
https://github.com/onnx/onnx.git
72
def test_nonzero_existing_dim_param(self) -> None: graph = self._make_graph( [('x', TensorProto.FLOAT, (3,))], [make_node('NonZero', ['x'], ['y'])], [make_tensor_value_info('y', TensorProto.INT64, (None, 'NZ'))])
10
133
test_nonzero_existing_dim_param
25
0
1
3
rllib/examples/connectors/adapt_connector_policy.py
137,817
[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)
ray
10
Python
24
adapt_connector_policy.py
def reset(self, *, seed=None, options=None): obs, info = self._env.reset() return np.hstack((obs, [8.0, 6.0])), info # Custom agent connector to drop the last 2 feature values.
8e680c483ce326cefc62e44f68ab1a6948b1c3d2
47
https://github.com/ray-project/ray.git
37
def reset(self, *, seed=None, options=None): obs, info = self._env.reset() return np.hstack((obs, [8
9
65
reset
29
0
3
10
deploy/pptracking/python/mot/tracker/ocsort_tracker.py
211,026
[MOT] Add OC_SORT tracker (#6272) * add ocsort tracker * add ocsort deploy * merge develop * fix ocsort tracker codes * fix doc, test=document_fix * fix doc, test=document_fix
PaddleDetection
12
Python
25
ocsort_tracker.py
def predict(self): if ((self.kf.x[6] + self.kf.x[2]) <= 0): self.kf.x[6] *= 0.0 self.kf.predict() self.age += 1 if (self.time_since_update > 0): self.hit_streak = 0 self.time_since_update += 1 self.history.append(convert_x_to_bbox(self.kf.x, score=self.score)) return self.history[-1]
c84153a355d9855fe55cf51d203b8b24e7d884e5
104
https://github.com/PaddlePaddle/PaddleDetection.git
107
def predict(self): if ((self.kf.x[6] + self.kf.x[2]) <= 0): self.kf.x[6] *= 0.0 self.kf.predict() self.age += 1 if (self.time_since_update > 0): self.hit_streak = 0 self.time_since_update += 1
11
159
predict
20
1
1
8
tests/openbb_terminal/stocks/dark_pool_shorts/test_stockgrid_view.py
285,338
Here we merge all API Refactor related branches (#2236) * Update api.py * Updated forex menu * refactor ycrv command * refactor ycrv command black * refactor ecocal command * Minh changes * Adding space to test pushing * title fix ecocal df * get economic calendar annotation * fix investingcom tests * refactor index command * refactor overview command * give defaults to wsj view function args * rename date args investincom * refacto bigmac command * fix ecocal typo * refactor rtps command * alphavantage gdp * alphavantage gdp per capita * alphavantage cpi * alphavantage tyld * alphavantage inf * refactor macro command * refactor macro command w helpers * refactor treasury command * fix macro on terminal * treasury labels * refactor maturities * update treasury maturities doc strings * refactor get economic calendar finhub * refactor map command api * display map filter choices * route economy api to performance map * route economy api to performance map * display group choices on valuation command * refactor performance and valuation commands * refactor spectrum model and view * add choices to spectrum controller * delete image after view * fix model tests finviz * fix finciz view tests * refactor futures * fix some tests * fix more tests * fix controller test * refactor fred series notes * update fred notes docstring * refacto fred series ids * fix pred and qa when empty datasets * refactor fred * uncomment stuff * refacto get series data * fix some tests * set defaults on args * refactor fred yield curve * black * fix spell and remove ecocal names * fix linting * linting * pylint fix * change dangerous defaults * Working through crypto fixes (#2256) * Working through crypto fixes * Continued adding crypto stuff * Added crypto overview * Added test fixes * Added fixtures * Fixed tests * Fixed charting issue * Removed broken APIs * Final adjustments * Added test fixes * map get groups and get ycrv countries into old api * exposed econdb helper funcs * remove helpers * refactor search indices * linting * refactor arg currency * pylint from currency * Started switching crpyto ascending to ascend * Merging * Portfolio model arguements, params, and docstring * Refactored for etf commands (#2292) * Refactored for etf commands * Fixed tests * Added load command * Fixed menu * Portfolio logic fixes * Added econometrics (#2260) * Added econometrics * Fixed tests * Simplified API * Added test fixes * Added test csv * Allowed examples to be loaded * Fund refactor (#2291) * Fund refactor * Changed fund_name and fund to name * Changed ascending to ascend * Stock menu refactoring for easier API usage (#2194) * Stocks refactoring for easier API usage * Linting * Refactor newly added features * Linting * Fixing tests * Refactor common files used by stocks menu * Fixing flake8 * Fix linting and tests * Linting * Fix flake8 * refactor insider_data * refactor mentions * refactor watchlist * refactor sentiment * refactor sentiment * fix yahoofinance tests * refactor load and candle * refactor get_news and display_news * refactor stocks.ins.act * candle default matplotlib * fix yahoofinance_view tests * fix ark model tests * fix ark view tests * fix business insider model * fix business insider view * refactor csimarket model * fix tests csi market model * update dd controller * fix get suppliers tests * fix dd controller tests * fix finhub tests * fix finviz tests * fix fmp tests * fix marketwatch tests * corrected argument keywords in test_bt_model * corrected argument keywords in test_bt_view * refactor fa controller * refactor marketwatch view * refactor gov controller * fix tests fa av * fix tests elect * fix dcf tests * fix polygon tests * fix fmp tests * fix quiverquant tests * fix yahoofinance fa tests * fix more fa tests * fix insider tests * fix more tests * fix more tests * fix options tests * fix stock gov tests * fix tests test_ba_controller * fix tests for test_finviz_compare_model.py * fixed 2 tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fix final tests * fixed tests * fixed tests * Fix tests * black * forgot to black tests * fixed tests * fixed tests * fixed tests * fixed tests * flakefix * Tests + code : Stocks / Discovery * fix tests * added recorder * fixed tests * fixed tests * black * black * remove unused imports * refactor display raw * sia dicts fix * pylint * linting * remove dangerous default * fix tests * fix beta model test * black * skip screener qa test * change sector path to sectors * update tests readme * fix metric defaults * black * substitute lost ticker * defaults cpic * another round on sia * refactor cramer * reduce default tweets on sentiment * refactor yf hist, corr, volume * arkorders default * refactor income, balance, cashflow * refacto scorr, screener, getfinnhub * refactor stockgrid * ibkr refactor * another round on stockgrid * add dividens end point * refactor discovery endpoints * update docstrings with similar input * refactor messages * refactor ba * refactor regioons * refactor twitter sentiment * refactor hist * refactor regions * give default to timeframe * refactor bunch of defaults and arg names * remove leftover imports * refactor vwap * let tests run * fix tests * fix stock tests * fix stockanalysis tests * flake * MYPY * Made important changes * added fixes * Fixed big issue * Added fixes to tests * fix qa tests * fix tests * fix 1 more test * last stocks failing * fix crypto test Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: montezdesousa <montezdesousa@gmail.com> Co-authored-by: hjoaquim <h.joaquim@campus.fct.unl.pt> Co-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com> Co-authored-by: colin99d <colin99delahunty@gmail.com> * fix portfolio tests * change period to window * update ca docstrings * refactor get_similar_companies func * Fixed * Update CI * Update CI 2 * Update CI 3 * Update dependencies Co-authored-by: colin99d <colin99delahunty@gmail.com> Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com> Co-authored-by: montezdesousa <montezdesousa@gmail.com> Co-authored-by: James Simmons <simmonsj330@gmail.com> Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com> Co-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com> Co-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com> Co-authored-by: northern-64bit <75195383+northern-64bit@users.noreply.github.com> Co-authored-by: hjoaquim <h.joaquim@campus.fct.unl.pt>
OpenBBTerminal
9
Python
19
test_stockgrid_view.py
def test_short_interest_volume(mocker, raw): # MOCK VISUALIZE_OUTPUT mocker.patch(target="openbb_terminal.helper_classes.TerminalStyle.visualize_output") stockgrid_view.short_interest_volume( symbol="PM", limit=2, raw=raw, export="", ) @pytest.mark.vcr @pytest.mark.record_stdout @pytest.mark.parametrize( "raw", [True, False], )
9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b
@pytest.mark.vcr @pytest.mark.record_stdout @pytest.mark.parametrize( "raw", [True, False], )
36
https://github.com/OpenBB-finance/OpenBBTerminal.git
61
def test_short_interest_volume(mocker, raw): # MOCK VISUALIZE_OUTPUT mocker.patch(target="openbb_terminal.helper_classes.Term
15
103
test_short_interest_volume
9
0
1
4
modin/experimental/pandas/test/test_io_exp.py
153,731
FIX-#4461: Fix S3 CSV data path (#4462) Signed-off-by: jeffreykennethli <jkli@ponder.io>
modin
12
Python
8
test_io_exp.py
def test_read_csv_without_glob(self): with pytest.warns(UserWarning, match=r"Shell-style wildcard"): with pytest.raises(FileNotFoundError): pd.read_csv_glob("s3://dask-data/nyc-taxi/2015/yellow_tripdata_2015-")
cf0eb393daa41abdd8cdf32b52ceee938cdcbe1a
32
https://github.com/modin-project/modin.git
41
def test_read_csv_without_glob(self): with pytest.warns(UserWarning, match=r"Shell-style wildcard"): with py
10
58
test_read_csv_without_glob
7
0
1
3
ludwig/models/ecd.py
6,009
Added end-to-end Torchscript compilation for tabular types (#1693)
ludwig
8
Python
7
ecd.py
def save_torchscript(self, save_path): traced = self.to_torchscript() traced.save(save_path)
68b0f1c9d4aaafabe973c38334b400766ead5348
20
https://github.com/ludwig-ai/ludwig.git
20
def save_torchscript(self, save_path):
6
33
save_torchscript
19
0
1
6
python/ray/tune/tests/test_convergence.py
143,148
[tune/structure] Refactor `suggest` into `search` package (#26074) This PR renames the `suggest` package to `search` and alters the layout slightly. In the new package, the higher-level abstractions are on the top level and the search algorithms have their own subdirectories. In a future refactor, we can turn algorithms such as PBT into actual `SearchAlgorithm` classes and move them into the `search` package. The main reason to keep algorithms and searchers in the same directory is to avoid user confusion - for a user, `Bayesopt` is as much a search algorithm as e.g. `PBT`, so it doesn't make sense to split them up.
ray
9
Python
18
test_convergence.py
def testConvergenceHyperopt(self): from ray.tune.search.hyperopt import HyperOptSearch np.random.seed(0) searcher = HyperOptSearch(random_state_seed=1234) analysis = self._testConvergence(searcher, patience=50, top=5) assert math.isclose(analysis.best_config["x"], 0, abs_tol=1e-2)
75d08b06328d213656e7280639b35ccecdfc34d0
67
https://github.com/ray-project/ray.git
53
def testConvergenceHyperopt(self): from ray.tune.search.hyperopt import HyperOptSearch np.random.seed(0) searcher = HyperOptSearch(ran
20
98
testConvergenceHyperopt
312
0
1
69
python/ray/data/tests/preprocessors/test_encoder.py
136,948
[Datasets] Split `test_preprocessors.py` into separate modules (#30633) test_preprocessors.py has gotten pretty long. So, I've split it up into modules like test_scaler.py. Signed-off-by: Balaji Veeramani <balaji@anyscale.com> Signed-off-by: Balaji Veeramani <bveeramani@berkeley.edu> Co-authored-by: Antoni Baum <antoni.baum@protonmail.com>
ray
11
Python
155
test_encoder.py
def test_multi_hot_encoder(): col_a = ["red", "green", "blue", "red"] col_b = ["warm", "cold", "hot", "cold"] col_c = [1, 10, 5, 10] col_d = [["warm"], [], ["hot", "warm", "cold"], ["cold", "cold"]] in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c, "D": col_d}) ds = ray.data.from_pandas(in_df) encoder = MultiHotEncoder(["B", "C", "D"]) # Transform with unfitted preprocessor. with pytest.raises(PreprocessorNotFittedException): encoder.transform(ds) # Fit data. encoder.fit(ds) assert encoder.stats_ == { "unique_values(B)": {"cold": 0, "hot": 1, "warm": 2}, "unique_values(C)": {1: 0, 5: 1, 10: 2}, "unique_values(D)": {"cold": 0, "hot": 1, "warm": 2}, } # Transform data. transformed = encoder.transform(ds) out_df = transformed.to_pandas() processed_col_a = col_a processed_col_b = [[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]] processed_col_c = [[1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]] processed_col_d = [[0, 0, 1], [0, 0, 0], [1, 1, 1], [2, 0, 0]] expected_df = pd.DataFrame.from_dict( { "A": processed_col_a, "B": processed_col_b, "C": processed_col_c, "D": processed_col_d, } ) assert out_df.equals(expected_df) # Transform batch. pred_col_a = ["blue", "yellow", None] pred_col_b = ["cold", "warm", "other"] pred_col_c = [10, 1, 20] pred_col_d = [["cold", "warm"], [], ["other", "cold"]] pred_in_df = pd.DataFrame.from_dict( {"A": pred_col_a, "B": pred_col_b, "C": pred_col_c, "D": pred_col_d} ) pred_out_df = encoder.transform_batch(pred_in_df) print(pred_out_df.to_string()) pred_processed_col_a = ["blue", "yellow", None] pred_processed_col_b = [[1, 0, 0], [0, 0, 1], [0, 0, 0]] pred_processed_col_c = [[0, 0, 1], [1, 0, 0], [0, 0, 0]] pred_processed_col_d = [[1, 0, 1], [0, 0, 0], [1, 0, 0]] pred_expected_df = pd.DataFrame.from_dict( { "A": pred_processed_col_a, "B": pred_processed_col_b, "C": pred_processed_col_c, "D": pred_processed_col_d, } ) assert pred_out_df.equals(pred_expected_df) # Test null behavior. null_col = [1, None] nonnull_col = [1, 1] null_df = pd.DataFrame.from_dict({"A": null_col}) null_ds = ray.data.from_pandas(null_df) nonnull_df = pd.DataFrame.from_dict({"A": nonnull_col}) nonnull_ds = ray.data.from_pandas(nonnull_df) null_encoder = OneHotEncoder(["A"]) # Verify fit fails for null values. with pytest.raises(ValueError): null_encoder.fit(null_ds) null_encoder.fit(nonnull_ds) # Verify transform fails for null values. with pytest.raises(ValueError): null_encoder.transform(null_ds) null_encoder.transform(nonnull_ds) # Verify transform_batch fails for null values. with pytest.raises(ValueError): null_encoder.transform_batch(null_df) null_encoder.transform_batch(nonnull_df)
2cab697e29aee1654a00ffe642d35b9171c09236
683
https://github.com/ray-project/ray.git
655
def test_multi_hot_encoder(): col_a = ["red", "green", "blue", "red"] col_b = ["warm", "cold", "hot", "cold"] col_c = [1, 10, 5, 10] col_d = [["warm"], [], ["hot", "warm", "cold"], ["cold", "cold"]] in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c, "D": col_d}) ds = ray.data.from_pandas(in_df) encoder = MultiHotEncoder(["B", "C", "D"]) # Transform with unfitted preprocessor. with pytest.raises(PreprocessorNotFittedException): encoder.transform(ds) # Fit data. encoder.fit(ds) assert encoder.stats_ == { "unique_values(B)": {"cold": 0, "hot": 1, "warm": 2}, "unique_values(C)": {1: 0, 5: 1, 10: 2}, "unique_values(D)": {"cold": 0, "hot": 1, "warm": 2}, } # Transform data. transformed = encoder.transform(ds) out_df = transformed.to_pandas() processed_col_a = col_a processed_col_b = [[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]] processed_col_c = [[1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]] processed_col_d = [[0, 0, 1]
53
1,087
test_multi_hot_encoder
22
0
2
7
erpnext/regional/report/eway_bill/eway_bill.py
67,208
style: format code with black
erpnext
12
Python
17
eway_bill.py
def execute(filters=None): if not filters: filters.setdefault("posting_date", [nowdate(), nowdate()]) columns, data = [], [] columns = get_columns() data = get_data(filters) return columns, data
494bd9ef78313436f0424b918f200dab8fc7c20b
51
https://github.com/frappe/erpnext.git
15
def execute(filters=None): if not filters: filters.setdefault("postin
8
86
execute
22
1
1
12
tests/openbb_terminal/stocks/options/test_syncretism_model.py
285,444
Here we merge all API Refactor related branches (#2236) * Update api.py * Updated forex menu * refactor ycrv command * refactor ycrv command black * refactor ecocal command * Minh changes * Adding space to test pushing * title fix ecocal df * get economic calendar annotation * fix investingcom tests * refactor index command * refactor overview command * give defaults to wsj view function args * rename date args investincom * refacto bigmac command * fix ecocal typo * refactor rtps command * alphavantage gdp * alphavantage gdp per capita * alphavantage cpi * alphavantage tyld * alphavantage inf * refactor macro command * refactor macro command w helpers * refactor treasury command * fix macro on terminal * treasury labels * refactor maturities * update treasury maturities doc strings * refactor get economic calendar finhub * refactor map command api * display map filter choices * route economy api to performance map * route economy api to performance map * display group choices on valuation command * refactor performance and valuation commands * refactor spectrum model and view * add choices to spectrum controller * delete image after view * fix model tests finviz * fix finciz view tests * refactor futures * fix some tests * fix more tests * fix controller test * refactor fred series notes * update fred notes docstring * refacto fred series ids * fix pred and qa when empty datasets * refactor fred * uncomment stuff * refacto get series data * fix some tests * set defaults on args * refactor fred yield curve * black * fix spell and remove ecocal names * fix linting * linting * pylint fix * change dangerous defaults * Working through crypto fixes (#2256) * Working through crypto fixes * Continued adding crypto stuff * Added crypto overview * Added test fixes * Added fixtures * Fixed tests * Fixed charting issue * Removed broken APIs * Final adjustments * Added test fixes * map get groups and get ycrv countries into old api * exposed econdb helper funcs * remove helpers * refactor search indices * linting * refactor arg currency * pylint from currency * Started switching crpyto ascending to ascend * Merging * Portfolio model arguements, params, and docstring * Refactored for etf commands (#2292) * Refactored for etf commands * Fixed tests * Added load command * Fixed menu * Portfolio logic fixes * Added econometrics (#2260) * Added econometrics * Fixed tests * Simplified API * Added test fixes * Added test csv * Allowed examples to be loaded * Fund refactor (#2291) * Fund refactor * Changed fund_name and fund to name * Changed ascending to ascend * Stock menu refactoring for easier API usage (#2194) * Stocks refactoring for easier API usage * Linting * Refactor newly added features * Linting * Fixing tests * Refactor common files used by stocks menu * Fixing flake8 * Fix linting and tests * Linting * Fix flake8 * refactor insider_data * refactor mentions * refactor watchlist * refactor sentiment * refactor sentiment * fix yahoofinance tests * refactor load and candle * refactor get_news and display_news * refactor stocks.ins.act * candle default matplotlib * fix yahoofinance_view tests * fix ark model tests * fix ark view tests * fix business insider model * fix business insider view * refactor csimarket model * fix tests csi market model * update dd controller * fix get suppliers tests * fix dd controller tests * fix finhub tests * fix finviz tests * fix fmp tests * fix marketwatch tests * corrected argument keywords in test_bt_model * corrected argument keywords in test_bt_view * refactor fa controller * refactor marketwatch view * refactor gov controller * fix tests fa av * fix tests elect * fix dcf tests * fix polygon tests * fix fmp tests * fix quiverquant tests * fix yahoofinance fa tests * fix more fa tests * fix insider tests * fix more tests * fix more tests * fix options tests * fix stock gov tests * fix tests test_ba_controller * fix tests for test_finviz_compare_model.py * fixed 2 tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fix final tests * fixed tests * fixed tests * Fix tests * black * forgot to black tests * fixed tests * fixed tests * fixed tests * fixed tests * flakefix * Tests + code : Stocks / Discovery * fix tests * added recorder * fixed tests * fixed tests * black * black * remove unused imports * refactor display raw * sia dicts fix * pylint * linting * remove dangerous default * fix tests * fix beta model test * black * skip screener qa test * change sector path to sectors * update tests readme * fix metric defaults * black * substitute lost ticker * defaults cpic * another round on sia * refactor cramer * reduce default tweets on sentiment * refactor yf hist, corr, volume * arkorders default * refactor income, balance, cashflow * refacto scorr, screener, getfinnhub * refactor stockgrid * ibkr refactor * another round on stockgrid * add dividens end point * refactor discovery endpoints * update docstrings with similar input * refactor messages * refactor ba * refactor regioons * refactor twitter sentiment * refactor hist * refactor regions * give default to timeframe * refactor bunch of defaults and arg names * remove leftover imports * refactor vwap * let tests run * fix tests * fix stock tests * fix stockanalysis tests * flake * MYPY * Made important changes * added fixes * Fixed big issue * Added fixes to tests * fix qa tests * fix tests * fix 1 more test * last stocks failing * fix crypto test Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: montezdesousa <montezdesousa@gmail.com> Co-authored-by: hjoaquim <h.joaquim@campus.fct.unl.pt> Co-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com> Co-authored-by: colin99d <colin99delahunty@gmail.com> * fix portfolio tests * change period to window * update ca docstrings * refactor get_similar_companies func * Fixed * Update CI * Update CI 2 * Update CI 3 * Update dependencies Co-authored-by: colin99d <colin99delahunty@gmail.com> Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com> Co-authored-by: montezdesousa <montezdesousa@gmail.com> Co-authored-by: James Simmons <simmonsj330@gmail.com> Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com> Co-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com> Co-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com> Co-authored-by: northern-64bit <75195383+northern-64bit@users.noreply.github.com> Co-authored-by: hjoaquim <h.joaquim@campus.fct.unl.pt>
OpenBBTerminal
11
Python
20
test_syncretism_model.py
def test_get_historical_greeks_invalid_status(mocker): mock_response = requests.Response() mock_response.status_code = 400 mocker.patch(target="requests.get", new=mocker.Mock(return_value=mock_response)) result_df = syncretism_model.get_historical_greeks( symbol="PM", expiry="2022-01-07", chain_id="PM220107P00090000", strike=90, put=True, ) assert result_df.empty @pytest.mark.vcr
9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b
@pytest.mark.vcr
67
https://github.com/OpenBB-finance/OpenBBTerminal.git
73
def test_get_historical_greeks_invalid_status(mocker): mock_response = requests.Response() mock_response.status_code = 400 mocker.patch(target="requests.get", new=mocker.Mock(return_value=mock_response)) r
23
119
test_get_historical_greeks_invalid_status
8
0
1
4
homeassistant/components/hdmi_cec/media_player.py
307,745
Enforce MediaPlayerState in hdmi_cec media player (#78522)
core
7
Python
8
media_player.py
def media_play(self) -> None: self.send_keypress(KEY_PLAY) self._attr_state = MediaPlayerState.PLAYING
b29605060a74c441550708ccf4ace4b697f66ae6
21
https://github.com/home-assistant/core.git
29
def media_play(self) -> None: self.send_keypress(KEY_PLAY) self._attr_state = Media
7
37
media_play
61
0
5
16
DemoPrograms/Demo_User_Settings.py
212,768
Getting more Demo Programs synced up. New demo for VPush too
PySimpleGUI
14
Python
44
Demo_User_Settings.py
def settings_window(): window = make_window() current_theme = sg.theme() while True: event, values = window.read() if event in (sg.WINDOW_CLOSED, 'Exit'): break if event == 'Save': # Save some of the values as user settings sg.user_settings_set_entry('-input-', values['-IN-']) sg.user_settings_set_entry('-theme-', values['-LISTBOX-'][0]) sg.user_settings_set_entry('-option1-', values['-CB1-']) sg.user_settings_set_entry('-option2-', values['-CB2-']) # if the theme was changed, restart the window if values['-LISTBOX-'][0] != current_theme: current_theme = values['-LISTBOX-'][0] window.close() window = make_window()
1eb653d91015c13ecd48eaa84d73efeaca94d5de
123
https://github.com/PySimpleGUI/PySimpleGUI.git
207
def settings_window(): window = make_window() current_theme = sg.theme() while True: event, values = window.read() if event in (sg.WINDOW_CLOSED, 'Exit'): break if event == 'Save': # Save some of the values as user settings sg.user_settings_set_entry('-input-', values['-IN-']) sg.user_settings_set_en
12
220
settings_window
20
0
1
5
tests/providers/ftp/hooks/test_ftp.py
45,825
Updates FTPHook provider to have test_connection (#21997) * Updates FTP provider to have test_connection Co-authored-by: eladkal <45845474+eladkal@users.noreply.github.com>
airflow
10
Python
18
test_ftp.py
def test_connection_success(self): with fh.FTPHook() as ftp_hook: status, msg = ftp_hook.test_connection() assert status is True assert msg == 'Connection successfully tested'
26e8d6d7664bbaae717438bdb41766550ff57e4f
31
https://github.com/apache/airflow.git
59
def test_connection_success(self): with fh.FTPHook() as ftp_hook: status, msg = ftp_hook.test_connection() assert status is True asse
8
56
test_connection_success
166
0
16
36
sympy/solvers/decompogen.py
196,939
added decompogen for Min and Max
sympy
13
Python
90
decompogen.py
def decompogen(f, symbol): f = sympify(f) if not isinstance(f, Expr) or isinstance(f, Relational): raise TypeError('expecting Expr but got: `%s`' % func_name(f)) if symbol not in f.free_symbols: return [f] result = [] # ===== Simple Functions ===== # if isinstance(f, (Function, Pow)): if f.is_Pow and f.base == S.Exp1: arg = f.exp else: arg = f.args[0] if arg == symbol: return [f] result += [f.subs(arg, symbol)] + decompogen(arg, symbol) return result # ===== Min/Max Functions ===== # if isinstance(f, (Min, Max)): if And(*[a.has(symbol) for a in f.args]): raise TypeError('cannot decompose %s' % f) for i in f.args: if i.has(symbol): arg = i result += [f.subs(i, symbol)] + decompogen(i, symbol) return result # ===== Convert to Polynomial ===== # fp = Poly(f) gens = list(filter(lambda x: symbol in x.free_symbols, fp.gens)) if len(gens) == 1 and gens[0] != symbol: f1 = f.subs(gens[0], symbol) f2 = gens[0] result += [f1] + decompogen(f2, symbol) return result # ===== Polynomial decompose() ====== # try: result += decompose(f) return result except ValueError: return [f]
4577d7bc0d6778506f6c2491636f3c06ecd0ff4d
295
https://github.com/sympy/sympy.git
410
def decompogen(f, symbol): f = sympify(f) if not isinstance(f, Expr) or isinstance(f, Relational): raise TypeError('expecting Expr but got: `%s`' % func_name(f)) if symbol not in f.free_symbols: return [f] result = [] # ===== Simple Functions ===== # if isinstance(f, (Function, Pow)): if f.is_Pow and f.base == S.Exp1: arg = f.exp else: arg = f.args[0] if arg == symbol: return [f] result += [f.subs(arg, symbol)] + decompogen(arg, symbol) return result # ===== Min/Max Functions ===== # if isinstance(f, (Min, Max)): if And(*[a.has(symbol) for a in f.args]): raise TypeError('ca
38
463
decompogen
48
0
2
12
tests/utils/test_dag_cycle.py
47,680
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
airflow
15
Python
34
test_dag_cycle.py
def test_cycle_large_loop(self): # large loop dag = DAG('dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) # A -> B -> C -> D -> E -> A with dag: start = EmptyOperator(task_id='start') current = start for i in range(10000): next_task = EmptyOperator(task_id=f'task_{i}') current.set_downstream(next_task) current = next_task current.set_downstream(start) with pytest.raises(AirflowDagCycleException): assert not check_cycle(dag)
49e336ae0302b386a2f47269a6d13988382d975f
83
https://github.com/apache/airflow.git
182
def test_cycle_large_loop(self): # large loop dag = DAG('dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) # A -> B -> C -> D -> E -> A with dag: start = EmptyOperator(task_id='start') current = start for i in range(10000): next_task = EmptyOperator(task_id=f'task_{i}') current.set_downstream(next_task) current = next_task current.set_downstream(start) with pytest.raises(AirflowD
19
151
test_cycle_large_loop
50
0
7
13
freqtrade/freqai/prediction_models/RL/RLPrediction_env.py
150,554
Working base for reinforcement learning model
freqtrade
12
Python
30
RLPrediction_env.py
def _calculate_reward(self, action): step_reward = 0 trade = False if ((action == Actions.Buy.value and self._position == Positions.Short) or (action == Actions.Sell.value and self._position == Positions.Long)): trade = True if trade: current_price = self.prices[self._current_tick] last_trade_price = self.prices[self._last_trade_tick] price_diff = current_price - last_trade_price if self._position == Positions.Long: step_reward += price_diff return step_reward
05ed1b544f2853ae0054cd22bd15e623abbb3aa9
97
https://github.com/freqtrade/freqtrade.git
169
def _calculate_reward(self, action): step_rew
19
150
_calculate_reward
107
1
2
8
tests/www/views/test_views_tasks.py
44,631
Modernize DAG-related URL routes and rename "tree" to "grid" (#20730) Co-authored-by: Igor Kholopov <kholopovus@gmail.com>
airflow
14
Python
68
test_views_tasks.py
def test_code_from_db_all_example_dags(admin_client): dagbag = DagBag(include_examples=True) for dag in dagbag.dags.values(): DagCode(dag.fileloc, DagCode._get_code_from_file(dag.fileloc)).sync_to_db() url = 'code?dag_id=example_bash_operator' resp = admin_client.get(url, follow_redirects=True) check_content_not_in_response('Failed to load DAG file Code', resp) check_content_in_response('example_bash_operator', resp) @pytest.mark.parametrize( "url, data, content", [ ('paused?dag_id=example_bash_operator&is_paused=false', None, 'OK'), ( "failed", dict( task_id="run_this_last", dag_id="example_bash_operator", dag_run_id=DEFAULT_DAGRUN, upstream="false", downstream="false", future="false", past="false", origin="/graph?dag_id=example_bash_operator", ), "Marked failed on 1 task instances", ), ( "success", dict( task_id="run_this_last", dag_id="example_bash_operator", dag_run_id=DEFAULT_DAGRUN, upstream="false", downstream="false", future="false", past="false", origin="/graph?dag_id=example_bash_operator", ), "Marked success on 1 task instances", ), ( "clear", dict( task_id="runme_1", dag_id="example_bash_operator", execution_date=DEFAULT_DATE, upstream="false", downstream="false", future="false", past="false", only_failed="false", ), "example_bash_operator", ), ( "run", dict( task_id="runme_0", dag_id="example_bash_operator", ignore_all_deps="false", ignore_ti_state="true", execution_date=DEFAULT_DATE, ), "", ), ], ids=[ "paused", "failed-flash-hint", "success-flash-hint", "clear", "run", ], )
f217becdfc371ea18486886cc3b2f47eeda0f77f
@pytest.mark.parametrize( "url, data, content", [ ('paused?dag_id=example_bash_operator&is_paused=false', None, 'OK'), ( "failed", dict( task_id="run_this_last", dag_id="example_bash_operator", dag_run_id=DEFAULT_DAGRUN, upstream="false", downstream="false", future="false", past="false", origin="/graph?dag_id=example_bash_operator", ), "Marked failed on 1 task instances", ), ( "success", dict( task_id="run_this_last", dag_id="example_bash_operator", dag_run_id=DEFAULT_DAGRUN, upstream="false", downstream="false", future="false", past="false", origin="/graph?dag_id=example_bash_operator", ), "Marked success on 1 task instances", ), ( "clear", dict( task_id="runme_1", dag_id="example_bash_operator", execution_date=DEFAULT_DATE, upstream="false", downstream="false", future="false", past="false", only_failed="false", ), "example_bash_operator", ), ( "run", dict( task_id="runme_0", dag_id="example_bash_operator", ignore_all_deps="false", ignore_ti_state="true", execution_date=DEFAULT_DATE, ), "", ), ], ids=[ "paused", "failed-flash-hint", "success-flash-hint", "clear", "run", ], )
70
https://github.com/apache/airflow.git
853
def test_code_from_db_all_example_dags(admin_client): dagbag = DagBag(include_examples=True) for dag in dagbag.dags.values(): DagCode(dag.fileloc, DagCode._get_code_from_file(dag.fileloc)).sync_to_db() url = 'code?dag_id=example_bash_operator' resp = admin_client.get(url, follow_redirects=True) check_content_not_in_response('Failed to load DAG file Code', resp) check_content_in_response('example_bash_operator', resp) @pytest.mark.parametrize( "url, data, content", [ ('paused?dag_id=example_bash_operator&is_paused=false', None, 'OK'), ( "failed", dict( task_id="run_this_last", dag_id="example_bash_operator", dag_run_id=DEFAULT_DAGRUN, upstream="false", downstream="false", future="false", past="false", origin="/graph?dag_id=example_bash_operator", ), "Marked failed on 1 task instances", ), ( "success", dict( task_id="run_this_last", dag_id="example_bash_operator", dag_run_id=DEFAULT_DAGRUN, upstream="false", downstream="false", future="false", past="false", origin="/graph?dag_id=example_bash_operator", ), "Marked success on 1 task instances", ), ( "clear", dict( task_id="runme_1", dag_id="example_bash_operator", execution_date=DEFAULT_DATE, upstream="false",
37
445
test_code_from_db_all_example_dags
104
1
1
37
tests/packaged_modules/test_folder_based_builder.py
105,516
Add AudioFolder packaged loader (#4530) * add audiofolder loader (almost identical to imagefolder except for inferring labels is not default) * add instruction on how to obtain list of audio extensions * add a generic loader * patch autofolder for streaming manually * align autofolder with the latest imagefolder implementation * update tests * add test for duplicate label col * add tests for autofolder (+copied from imagefolder) * add missed audio_file fixture * add documentation * remove boilerplate, make base feature builder's class arg instead of a config's one * remove self.config.label_name, use hardcoded 'label' * patch parents that inherit from DatasetBuilder, revert get_imports * rename autofolder -> folder_builder * make base column name an abstract attr of FolderBuilder instead of config's parameter * Update src/datasets/streaming.py Co-authored-by: Mario Šaško <mario@huggingface.co> * rename FolderBuilder -> FolderBasedBuilder * set drop_labels to None by default for AudioFolder * update documentation * check if builder extending for streaming is not in datasets.builder module Co-authored-by: Mario Šaško <mario@huggingface.co> Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
datasets
12
Python
57
test_folder_based_builder.py
def data_files_with_two_splits_and_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "autofolder_data_dir_with_metadata_two_splits" data_dir.mkdir(parents=True, exist_ok=True) train_dir = data_dir / "train" train_dir.mkdir(parents=True, exist_ok=True) test_dir = data_dir / "test" test_dir.mkdir(parents=True, exist_ok=True) filename = train_dir / "file.txt" # train shutil.copyfile(auto_text_file, filename) filename2 = train_dir / "file2.txt" # train shutil.copyfile(auto_text_file, filename2) filename3 = test_dir / "file3.txt" # test shutil.copyfile(auto_text_file, filename3) train_metadata_filename = train_dir / "metadata.jsonl" train_metadata = textwrap.dedent( ) with open(train_metadata_filename, "w", encoding="utf-8") as f: f.write(train_metadata) test_metadata_filename = test_dir / "metadata.jsonl" test_metadata = textwrap.dedent( ) with open(test_metadata_filename, "w", encoding="utf-8") as f: f.write(test_metadata) data_files_with_two_splits_and_metadata = DataFilesDict.from_local_or_remote( get_data_patterns_locally(data_dir), data_dir ) assert len(data_files_with_two_splits_and_metadata) == 2 assert len(data_files_with_two_splits_and_metadata["train"]) == 3 assert len(data_files_with_two_splits_and_metadata["test"]) == 2 return data_files_with_two_splits_and_metadata @pytest.fixture
6ea46d88c6a09244d785e55e2681bc4033740442
@pytest.fixture
205
https://github.com/huggingface/datasets.git
220
def data_files_with_two_splits_and_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "autofolder_data_dir_with_metadata_two_splits" data_dir.mkdir(parents=True, exist_ok=True) train_dir = data_dir / "train" train_dir.mkdir(parents=True, exist_ok=True) test_dir = data_dir / "test" test_dir.mkdir(parents=True, exist_ok=True) filename = train_dir / "file.txt" # train shutil.copyfile(auto_text_file, filename) filename2 = train_dir / "file2.txt" # train shutil.copyfile(auto_text_file, filename2) filename3 = test_dir / "file3.txt" # test shutil.copyfile(auto_text_file, filename3) train_metadata_filename = train_dir / "metadata.jsonl" train_metadata = textwrap.dedent( ) with open(train_metadata_filename, "w", encoding="utf-8") as f: f.write(train_metadata) test_metadata_filename = test_dir / "metadata.jsonl" test_metadata = textwrap.dedent( ) with open(test_metadata_filename, "w", encoding="utf-8") as f: f.write(test_metadata) data_files_with_two_splits_and_metadata = DataFilesDict.from_local_or_remote( get_data_patterns_locally(data_dir), data_dir ) assert len(data_files_with_two_splits_and_metadata) == 2 assert len(data_files_with_two_splits_and_metadata["train"]) == 3 assert len(data_files_with_two_splits_and_metadata["test"]) == 2 return data_files_with_two_splits_and_me
30
365
data_files_with_two_splits_and_metadata
233
0
1
34
sympy/integrals/tests/test_manual.py
198,335
manualintegrate poly*(a+b*x+c*x**2)**(n+1/2)
sympy
22
Python
100
test_manual.py
def test_manualintegrate_sqrt_quadratic(): assert_is_integral_of(1/sqrt((x - I)**2-1), log(2*x + 2*sqrt(x**2 - 2*I*x - 2) - 2*I)) assert_is_integral_of(1/sqrt(3*x**2+4*x+5), sqrt(3)*asinh(3*sqrt(11)*(x + S(2)/3)/11)/3) assert_is_integral_of(1/sqrt(-3*x**2+4*x+5), sqrt(3)*asin(3*sqrt(19)*(x - S(2)/3)/19)/3) assert_is_integral_of(1/sqrt(3*x**2+4*x-5), sqrt(3)*log(6*x + 2*sqrt(3)*sqrt(3*x**2 + 4*x - 5) + 4)/3) assert manualintegrate(1/sqrt(a+b*x+c*x**2), x) == \ Piecewise((log(b + 2*sqrt(c)*sqrt(a + b*x + c*x**2) + 2*c*x)/sqrt(c), Ne(c, 0)), (2*sqrt(a + b*x)/b, Ne(b, 0)), (x/sqrt(a), True)) assert_is_integral_of((7*x+6)/sqrt(3*x**2+4*x+5), 7*sqrt(3*x**2 + 4*x + 5)/3 + 4*sqrt(3)*asinh(3*sqrt(11)*(x + S(2)/3)/11)/9) assert_is_integral_of((7*x+6)/sqrt(-3*x**2+4*x+5), -7*sqrt(-3*x**2 + 4*x + 5)/3 + 32*sqrt(3)*asin(3*sqrt(19)*(x - S(2)/3)/19)/9) assert_is_integral_of((7*x+6)/sqrt(3*x**2+4*x-5), 7*sqrt(3*x**2 + 4*x - 5)/3 + 4*sqrt(3)*log(6*x + 2*sqrt(3)*sqrt(3*x**2 + 4*x - 5) + 4)/9) assert manualintegrate((d+e*x)/sqrt(a+b*x+c*x**2), x) == \ Piecewise((e*sqrt(a + b*x + c*x**2)/c + (-b*e/(2*c) + d)*log(b + 2*sqrt(c)*sqrt(a + b*x + c*x**2) + 2*c*x)/sqrt(c), Ne(c, 0)), ((2*d*sqrt(a + b*x) + 2*e*(-a*sqrt(a + b*x) + (a + b*x)**(S(3)/2)/3)/b)/b, Ne(b, 0)), ((d*x + e*x**2/2)/sqrt(a), True)) assert manualintegrate((3*x**3-x**2+2*x-4)/sqrt(x**2-3*x+2), x) == \ sqrt(x**2 - 3*x + 2)*(x**2 + 13*x/4 + S(101)/8) + 135*log(2*x + 2*sqrt(x**2 - 3*x + 2) - 3)/16 assert_is_integral_of(sqrt(53225*x**2-66732*x+23013), (x/2 - S(16683)/53225)*sqrt(53225*x**2 - 66732*x + 23013) + 111576969*sqrt(2129)*asinh(53225*x/10563 - S(11122)/3521)/1133160250) assert manualintegrate(sqrt(a+c*x**2), x) == \ Piecewise((a*log(2*sqrt(c)*sqrt(a + c*x**2) + 2*c*x)/(2*sqrt(c)) + x*sqrt(a + c*x**2)/2, Ne(c, 0)), (sqrt(a)*x, True)) assert manualintegrate(sqrt(a+b*x+c*x**2), x) == \ Piecewise(((x/2 + b/(4*c))*sqrt(a + b*x + c*x**2) + (a/2 - b**2/(8*c))*log(b + 2*sqrt(c)*sqrt(a + b*x + c*x**2) + 2*c*x)/sqrt(c), Ne(c, 0)), (2*(a + b*x)**(S(3)/2)/(3*b), Ne(b, 0)), (sqrt(a)*x, True)) assert_is_integral_of(x*sqrt(x**2+2*x+4), (x**2/3 + x/6 + S(5)/6)*sqrt(x**2 + 2*x + 4) - 3*asinh(sqrt(3)*(x + 1)/3)/2)
ae1662c58912be3363b6232999b60b90050cdd0f
1,241
https://github.com/sympy/sympy.git
597
def test_manualintegrate_sqrt_quadratic(): assert_is_integral_of(1/sqrt((x - I)**2-1), log(2*x + 2*sqrt(x**2 - 2*I*x - 2) - 2*I)) assert_is_integral_of(1/sqrt(3*x**2+4*x+5), sqrt(3)*asinh(3*sqrt(11)*(x + S(2)/3)/11)/3) assert_is_integral_of(1/sqrt(-3*x**2+4*x+5), sqrt(3)*asin(3*sqrt(19)*(x - S(2)/3)/19)/3) assert_is_integral_of(1/sqrt(3*x**2+4*x-5), sqrt(3)*log(6*x + 2*sqrt(3)*sqrt(3*x**2 + 4*x - 5) + 4)/3) assert manualintegrate(1/sqrt(a+b*x+c*x**2), x) == \ Piecewise((log(b + 2*sqrt(c)*sqrt(a + b*x + c*x**2) + 2*c*x)/sqrt(c), Ne(c, 0)), (2*sqrt(a + b*x)/b, Ne(b, 0)), (x/sqrt(a), True)) assert_is_integral_of((7*x+6)/sqrt(3*x**2+4*x+5), 7*sqrt(3*x**2 + 4*x + 5)/3 + 4*sqrt(3)*asinh(3*sqrt(11)*(x + S(2)/3)/11)/9) assert_is_integral_of((7*x+6)/sqrt(-3*x**2+4*x+5), -7*sqrt(-3*x**2 + 4*x + 5)/3 + 32*sqrt(3)*asin(3*sqrt(19)*(x - S(2)/3)/19)/9) assert_is_integral_of((7*x+6)/sqrt(3*x**2+4*x-5), 7*sqrt(3*x**2 + 4*x - 5)/3 + 4*sqrt(3)*log(6*x + 2*sqrt(3)*sqrt(3*x**2 + 4*x - 5) + 4)/9) assert manualintegrate((d+e*x)/sqrt(a+b*x+c*x**2), x) == \ Piecewise((e*sqrt(a + b*x + c*x**2)/c + (-b*e/(2*c) + d)*log(b + 2*sqrt(c)*sqrt(a + b*x + c*x**2) + 2*c*x)/sqrt(c), Ne(c, 0)), ((2*d*sqrt(a + b*x) + 2*e*(-a*sqrt(a + b*x) + (a + b*x)**(S(3)/2)/3)/b)/b, Ne(b, 0)), ((d*x + e*x**2/2)/sqrt(a), True)) assert manualintegrate((3*x**3-x**2+2*x-4)/sqrt(x**2-3*x+2), x)
17
1,879
test_manualintegrate_sqrt_quadratic
45
0
5
12
homeassistant/components/ecobee/climate.py
298,678
Use climate enums in ecobee (#70632)
core
11
Python
35
climate.py
def set_temperature(self, **kwargs): low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW) high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH) temp = kwargs.get(ATTR_TEMPERATURE) if self.hvac_mode == HVACMode.HEAT_COOL and ( low_temp is not None or high_temp is not None ): self.set_auto_temp_hold(low_temp, high_temp) elif temp is not None: self.set_temp_hold(temp) else: _LOGGER.error("Missing valid arguments for set_temperature in %s", kwargs)
7b1d5fb10af9cf71fae27f9e1020e18bd1fc2510
84
https://github.com/home-assistant/core.git
145
def set_temperature(self, **kwargs): low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW) high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH) temp = kwargs.get(ATTR_TEMPERATURE) if self.hvac_mode == HVACMode.HEAT_COOL and ( low_temp is not None or high_temp is no
17
136
set_temperature
33
0
3
8
jax/_src/lib/xla_bridge.py
122,323
Migrate JAX internals to builtin Python logging This commit changes the JAX codebase to use Python's builtin logging instead of ABSL logging. With the latter being used in JAX code as of now, the change to Python builtin logging is advised for the following reasons (among others): - absl-py can be removed as an external dependency of JAX. - Builtin logging brings the option of adding more log handlers, for example file handlers for log dumps or writers to different IO streams. Logging in JAX is ported over to take place at the module level. While previously, some Python namespaces within JAX already used module-scoped logging via absl.vlog, the following idiom was adopted to provide the same functionality in Python builtin logging: ```py import logging logger = logging.getLogger(__name__) logger.debug(...) logger.info(...) ``` The builtin root logger is left untouched, which is beneficial for downstream users planning to customize the Python root logger. All JAX internal code promises to log to descendants of the top-level "jax" logger by virtue of log propagation. The package `absl-py` was removed from JAX's install requirements, and added into its test requirements.
jax
10
Python
23
xla_bridge.py
def _make_tpu_driver_client(): if tpu_driver_client is None: logger.info("Remote TPU is not linked into jax; skipping remote TPU.") return None if FLAGS.jax_backend_target is None: logger.info("No --jax_backend_target was provided; skipping remote TPU.") return None return tpu_driver_client.TpuBackend.create(worker=FLAGS.jax_backend_target)
efd61b73f6a3c54a1043881f0670ae2b9dff4c51
45
https://github.com/google/jax.git
47
def _make_tpu_driver_client(): if tpu_driver_client is None: logger.info("Remote TPU is not linked into j
9
77
_make_tpu_driver_client
233
0
24
62
python3.10.4/Lib/email/_header_value_parser.py
223,535
add python 3.10.4 for windows
XX-Net
14
Python
82
_header_value_parser.py
def parse_mime_version(value): # The [CFWS] is implicit in the RFC 2045 BNF. # XXX: This routine is a bit verbose, should factor out a get_int method. mime_version = MIMEVersion() if not value: mime_version.defects.append(errors.HeaderMissingRequiredValue( "Missing MIME version number (eg: 1.0)")) return mime_version if value[0] in CFWS_LEADER: token, value = get_cfws(value) mime_version.append(token) if not value: mime_version.defects.append(errors.HeaderMissingRequiredValue( "Expected MIME version number but found only CFWS")) digits = '' while value and value[0] != '.' and value[0] not in CFWS_LEADER: digits += value[0] value = value[1:] if not digits.isdigit(): mime_version.defects.append(errors.InvalidHeaderDefect( "Expected MIME major version number but found {!r}".format(digits))) mime_version.append(ValueTerminal(digits, 'xtext')) else: mime_version.major = int(digits) mime_version.append(ValueTerminal(digits, 'digits')) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) mime_version.append(token) if not value or value[0] != '.': if mime_version.major is not None: mime_version.defects.append(errors.InvalidHeaderDefect( "Incomplete MIME version; found only major number")) if value: mime_version.append(ValueTerminal(value, 'xtext')) return mime_version mime_version.append(ValueTerminal('.', 'version-separator')) value = value[1:] if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) mime_version.append(token) if not value: if mime_version.major is not None: mime_version.defects.append(errors.InvalidHeaderDefect( "Incomplete MIME version; found only major number")) return mime_version digits = '' while value and value[0] not in CFWS_LEADER: digits += value[0] value = value[1:] if not digits.isdigit(): mime_version.defects.append(errors.InvalidHeaderDefect( "Expected MIME minor version number but found {!r}".format(digits))) mime_version.append(ValueTerminal(digits, 'xtext')) else: mime_version.minor = int(digits) mime_version.append(ValueTerminal(digits, 'digits')) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) mime_version.append(token) if value: mime_version.defects.append(errors.InvalidHeaderDefect( "Excess non-CFWS text after MIME version")) mime_version.append(ValueTerminal(value, 'xtext')) return mime_version
8198943edd73a363c266633e1aa5b2a9e9c9f526
440
https://github.com/XX-net/XX-Net.git
645
def parse_mime_version(value): # The [CFWS] is implicit in the RFC 2045 BNF. # XXX: This routine is a bit verbose, should factor out a get_int method. mime_version = MIMEVersion() if not value: mime_version.defects.append(errors.HeaderMissingRequiredValue( "Missing MIME version number (eg: 1.0)")) return mime_version if value[0] in CFWS_LEADER: token, value = get_cfws(value) mime_version.append(token) if not value: mime_version.defects.append(errors.HeaderMissingRequiredValue( "Expected MIME version number but found only CFWS")) digits = '' while value and value[0] != '.' and value[0] not in CFWS_LEADER: digits += value[0] value = value[1:] if not digits.isdigit(): mime_version.defects.append(errors.InvalidHeaderDefect( "Expected MIME major version number but found {!r}".format(digits))) mime_version.append(ValueTerminal(digits, 'xtext')) else: mime_version.major = int(digits) mime_version.append(ValueTerminal(digits, 'digits')) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) mime_version.a
19
742
parse_mime_version
13
0
2
14
tests/pytests/unit/transport/test_tcp.py
215,604
Fix pre-commit
salt
10
Python
12
test_tcp.py
def xtest_client_reconnect_backoff(client_socket): opts = {"tcp_reconnect_backoff": 5} client = salt.transport.tcp.MessageClient( opts, client_socket.listen_on, client_socket.port )
e70b30ce4f3ac47695fca662ca9e353bf90dabc9
82
https://github.com/saltstack/salt.git
28
def xtest_client_reconnect_backoff(client_socket): opts = {"tcp_reconnect_backoff": 5} client = salt.transport.tcp.MessageClient( opts, client_socket.listen_on, client_socket.po
10
51
xtest_client_reconnect_backoff
40
0
1
6
sympy/printing/tests/test_latex.py
197,967
Fixing bug in MatMul.could_extract_minus_sign
sympy
12
Python
25
test_latex.py
def test_issue_15439(): x = MatrixSymbol('x', 2, 2) y = MatrixSymbol('y', 2, 2) assert latex((x * y).subs(y, -y)) == r"x \left(- y\right)" assert latex((x * y).subs(y, -2*y)) == r"x \left(- 2 y\right)" assert latex((x * y).subs(x, -x)) == r"\left(- x\right) y"
1d3c89501c65e4b6cd3b635be60ba1d2bf003b4d
86
https://github.com/sympy/sympy.git
54
def test_issue_15439(): x = MatrixSymbol('x', 2, 2) y = MatrixSymbol('y', 2, 2)
6
136
test_issue_15439
21
0
1
5
python/ray/tests/test_runtime_env_validation.py
147,100
[runtime env] Change `pip_check` default from `True` to `False` (#23306) @SongGuyang @Catch-Bull @edoakes I know we discussed this earlier, but after thinking about it some more I think a more reasonable default is for `pip check` to be `False` by default. My guess is that a lot of users (including myself) work inside an environment where `python -m pip check` fails, but the environment doesn't cause them any problems otherwise. So a lot of users will hit an error when trying a simple `runtime_env` `pip` example, and possibly give up. Another less important piece of evidence is that we had to set `pip_check = False` to make some CI tests pass in the original PR. This also matches the default behavior of pip which allows this situation to occur in the first place: `pip install` doesn't error when there's a dependency conflict; rather the command succeeds, the package is installed and usable, and it prints a warning (which is confusingly titled "ERROR")
ray
10
Python
16
test_runtime_env_validation.py
def test_validate_ray(self): result = parse_and_validate_pip(["pkg1", "ray", "pkg2"]) assert result["packages"] == ["pkg1", "ray", "pkg2"] assert not result["pip_check"] assert "pip_version" not in result
16fd099b8b881c7e195fea7e52832d5784c2325e
41
https://github.com/ray-project/ray.git
48
def test_validate_ray(self): result = parse_and_validate_pip(["pkg1", "ray", "pkg2"]) assert result["packages"] == ["pkg1", "ray", "pkg2"] assert not result["pip_check"]
4
77
test_validate_ray
70
0
2
12
rllib/utils/exploration/tests/test_curiosity.py
137,999
[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)
ray
12
Python
57
test_curiosity.py
def env_maker(config): name = config.get("name", "MiniGrid-Empty-5x5-v0") framestack = config.get("framestack", 4) env = gym.make(name) # Make it impossible to reach goal by chance. env = gym.wrappers.TimeLimit(env, max_episode_steps=15) # Only use image portion of observation (discard goal and direction). env = minigrid.wrappers.ImgObsWrapper(env) env = OneHotWrapper( env, config.vector_index if hasattr(config, "vector_index") else 0, framestack=framestack, ) return env register_env("mini-grid", env_maker) CONV_FILTERS = [[16, [11, 11], 3], [32, [9, 9], 3], [64, [5, 5], 3]]
8e680c483ce326cefc62e44f68ab1a6948b1c3d2
83
https://github.com/ray-project/ray.git
118
def env_maker(config): name = config.get("name", "MiniGrid-Empty-5x5-v0") framestack = config.get("framestack", 4) env = gym.make(name) # Make it impossible to reach goal by chance. env = gym.wrappers.TimeLimit(env, max_episode_step
18
194
env_maker
11
0
1
5
homeassistant/components/cast/media_player.py
292,335
Deduplicate code in cast media_player (#66815) Co-authored-by: Paulus Schoutsen <balloob@gmail.com>
core
9
Python
11
media_player.py
async def async_added_to_hass(self): self._async_setup(self.entity_id) self._cast_view_remove_handler = async_dispatcher_connect( self.hass, SIGNAL_HASS_CAST_SHOW_VIEW, self._handle_signal_show_view )
c582aecc10f82c2f528bd8ae630445a07bcfb615
30
https://github.com/home-assistant/core.git
50
async def async_added_to_hass(self): self._async_setup(self.entity_id) self._cast_view_remove_handler = async_dispatcher_connect( self.hass, SIGNAL_HASS_CAST_SHOW_VIEW, self._handle_signa
9
50
async_added_to_hass
118
0
3
22
keras/saving/experimental/saving_lib_test.py
279,885
Keras Saving: Make sure the optimizer weights are also built and restored upon loading. Also allow the weights used in the test to have proper gradients, and make the input shape key in config consistent across Sequential and other models. PiperOrigin-RevId: 475455814
keras
11
Python
72
saving_lib_test.py
def test_saving_model_state(self, model_type): temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras") model = getattr(self, f"_get_{model_type}_model")() x = np.random.random((100, 32)) y = np.random.random((100, 1)) model.fit(x, y, epochs=1) # Assert that the archive has not been saved. self.assertFalse(os.path.exists(temp_filepath)) # Mutate the `Dense` layer custom weights to ensure that list and # dict-contained weights get restored. model.layers[1].additional_weights[0].assign([[2]]) model.layers[1].weights_in_dict["my_weight"].assign([[2]]) model.layers[1].nested_layer.kernel.assign([[1]]) model._save_experimental(temp_filepath) # Assert that the archive has been saved. self.assertTrue(os.path.exists(temp_filepath)) loaded_model = saving_lib.load_model(temp_filepath) self.assertEqual(model._is_compiled, loaded_model._is_compiled) # The weights are supposed to be the same (between original and loaded # models). for original_weights, loaded_weights in zip( model.get_weights(), loaded_model.get_weights() ): np.testing.assert_allclose(original_weights, loaded_weights) # The optimizer variables are supposed to be the same (between original # and loaded models). for original_weights, loaded_weights in zip( model.optimizer.variables(), loaded_model.optimizer.variables() ): np.testing.assert_allclose(original_weights, loaded_weights)
ead59b2c4c85284d8c2095e691800255068694ce
249
https://github.com/keras-team/keras.git
336
def test_saving_model_state(self, model_type): temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras") model = getattr(self, f"_get_{model_type}_model")() x = np.random.random((100, 32)) y = np.random.random((100, 1)) model.fit(x, y, epochs=1) # Assert that the archive has not been saved. self.assertFalse(os.path.exists(temp_filepath)) # Mutate the `Dense` layer custom weights to ensure that list and # dict-contained weights get restored. model.layers[1].additional_weights[0].assign([[2]]) model.layers[1].weights_in_dict["my_weight"].assign([[2]]) model.layers[1].nested_layer.kernel.assign([[1]]) model._save_experimental(temp_filepath) # Assert that the archive has been saved. self.assertTrue(os.path.exists(temp_filepath)) loaded_model = saving_lib.load_model(temp_filepath) self.assertEqual(model._is_compiled, loaded_model._is_compiled) # The weights are supposed to be the same (between original and loaded # models). for original_weights, loaded_weights in zip( model.get_weights(), loaded_model.get_weights() ): np.
39
399
test_saving_model_state
11
0
1
4
src/textual/_cache.py
186,334
simpler
textual
9
Python
11
_cache.py
def __repr__(self) -> str: return ( f"<LRUCache maxsize={self._maxsize} hits={self.hits} misses={self.misses}" )
8007c612d45429c7b0cc4314f4aaedccb78334b9
12
https://github.com/Textualize/textual.git
35
def __repr__(self) -> str: return ( f"<LRUCache maxsize={self._ma
6
41
__repr__
114
1
3
17
ivy_tests/test_core/test_image.py
213,917
renamed dev_str arg to dev for all methods.
ivy
11
Python
80
test_image.py
def test_gradient_image(x_n_dy_n_dx, dtype, tensor_fn, dev, call): # smoke test x, dy_true, dx_true = x_n_dy_n_dx x = tensor_fn(x, dtype, dev) dy, dx = ivy.gradient_image(x) # type test assert ivy.is_array(dy) assert ivy.is_array(dx) # cardinality test assert dy.shape == x.shape assert dx.shape == x.shape # value test dy_np, dx_np = call(ivy.gradient_image, x) dy_true = ivy.backends.numpy.array(dy_true, dtype) dx_true = ivy.backends.numpy.array(dx_true, dtype) assert np.allclose(dy_np, dy_true) assert np.allclose(dx_np, dx_true) # compilation test if call in [helpers.torch_call]: # torch device cannot be assigned value of string while scripting return if not ivy.wrapped_mode(): helpers.assert_compilable(ivy.gradient_image) # float_img_to_uint8_img @pytest.mark.parametrize( "fi_tui", [([[0., 1.], [2., 3.]], [[[0, 0, 0, 0], [0, 0, 128, 63]], [[0, 0, 0, 64], [0, 0, 64, 64]]])]) @pytest.mark.parametrize( "tensor_fn", [ivy.array, helpers.var_fn])
d743336b1f3654cd0315f380f43eed4116997c1d
@pytest.mark.parametrize( "fi_tui", [([[0., 1.], [2., 3.]], [[[0, 0, 0, 0], [0, 0, 128, 63]], [[0, 0, 0, 64], [0, 0, 64, 64]]])]) @pytest.mark.parametrize( "tensor_fn", [ivy.array, helpers.var_fn])
154
https://github.com/unifyai/ivy.git
208
def test_gradient_image(x_n_dy_n_dx, dtype, tensor_fn, dev, call): # smoke test x, dy_true, dx_true = x_n_dy_n_dx x = tensor_fn(x, dtype, dev) dy, dx = ivy.gradient_image(x) # type test assert ivy.is_array(dy) assert ivy.is_array(dx) # cardinality test assert dy.shape == x.shape assert dx.shape == x.shape # value test dy_np, dx_np = call(i
30
361
test_gradient_image
7
0
1
4
jina/serve/runtimes/worker/__init__.py
12,511
feat: add grpc health checking (#4779)
jina
8
Python
7
__init__.py
async def async_teardown(self): self._health_servicer.enter_graceful_shutdown() await self.async_cancel() self._data_request_handler.close()
ef662b529b2a2eecea7bb99759a9f7b9d86d3062
26
https://github.com/jina-ai/jina.git
35
async def async_teardown(self): self._health_servicer.enter_graceful_shutdown() await self.async_cancel() self._data_request_handler.close()
7
49
async_teardown
16
0
1
4
packages/syft/src/syft/core/tensor/fixed_precision_tensor.py
916
working tranpose
PySyft
10
Python
13
fixed_precision_tensor.py
def transpose(self, *args, **kwargs) -> FixedPrecisionTensor: res = FixedPrecisionTensor(base=self._base, precision=self._precision) res.child = self.child.transpose(*args, **kwargs) return res
997aef6a577613957f9e3b07454f1187a13bd2af
47
https://github.com/OpenMined/PySyft.git
40
def transpose(self, *args, **kwargs) -> FixedPrecisionTensor: res = FixedPrecisionTensor(base=self._base, precision=self._precision) res.child = self.child.transpose(*args, **kwargs)
11
72
transpose
80
0
1
12
sympy/core/tests/test_arit.py
200,390
Fix various typos Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`
sympy
10
Python
39
test_arit.py
def test_denest_add_mul(): # when working with evaluated expressions make sure they denest eq = x + 1 eq = Add(eq, 2, evaluate=False) eq = Add(eq, 2, evaluate=False) assert Add(*eq.args) == x + 5 eq = x*2 eq = Mul(eq, 2, evaluate=False) eq = Mul(eq, 2, evaluate=False) assert Mul(*eq.args) == 8*x # but don't let them denest unnecessarily eq = Mul(-2, x - 2, evaluate=False) assert 2*eq == Mul(-4, x - 2, evaluate=False) assert -eq == Mul(2, x - 2, evaluate=False)
24f1e7730119fe958cc8e28411f790c9a5ec04eb
135
https://github.com/sympy/sympy.git
118
def test_denest_add_mul(): # when working with evaluated expressions make sure they denest eq = x + 1 eq = Add(eq, 2, evaluate=False) eq = Ad
7
203
test_denest_add_mul
37
0
3
13
sklearn/mixture/_bayesian_mixture.py
260,638
MAINT (Bayesian)GaussianMixture use _validate_params (#24021) Co-authored-by: jeremiedbb <jeremiedbb@yahoo.fr>
scikit-learn
14
Python
26
_bayesian_mixture.py
def _check_means_parameters(self, X): _, n_features = X.shape if self.mean_precision_prior is None: self.mean_precision_prior_ = 1.0 else: self.mean_precision_prior_ = self.mean_precision_prior if self.mean_prior is None: self.mean_prior_ = X.mean(axis=0) else: self.mean_prior_ = check_array( self.mean_prior, dtype=[np.float64, np.float32], ensure_2d=False ) _check_shape(self.mean_prior_, (n_features,), "means")
610ada79c9fe7219ef7f2a283e9e0f02e122f948
98
https://github.com/scikit-learn/scikit-learn.git
160
def _check_means_parameters(self, X): _, n_features = X.shape if self.mean_precision_prior is None: self.mean_precision_prior_ = 1.0 else: self.mean_precision_prior_ = self.mean_precision_prior if self.mean_prior is None: self.mean_prior_ = X.mean(axis=0) else: self.mean_prior_ = check_array( self.mean_prior, dtype=[np.float64, np.float32], ensure_2d=False ) _check_shape(self.mean_prior_, (n_fea
19
151
_check_means_parameters
83
0
6
20
bots/helpers.py
282,900
Discord bot massive improvement (#1481) * allow logs feature flag * Adding log collection md * upload last log at startup * additions/refractor * refactor * lint/black ++ * disc * TimeRotating Logger and upload to s3 * corrected regex error * makeup for config * logging/disc/sia/etf/++ * append .log before uploading * process to upload logs to s3 * candle ta/etfmcds * fix * ta candles * implement presignedURL * fixed regex * ma's in 1 cmd, delete older files * refactor ta candle * updates * black * moon? * Logger uploader * rotate every hour * only archive if successful * chavis suggestions * windows * ta * commands_dict update * discord tacmds * log_collection error fix * fix * fix * pylint * bb fix * only log filesize * fixes * discord logs * Delete log_collection.md * fixes for other bots on images * bots image upload fix * updated helpers/load candle * more ta cc/housekeeping/refactors/slashcmds * update bots cmds_dict * adjustments to font size/fixes * test fixs/disc earnings * missed a spot * fixes had > revesred * reversed the >< again oops * remove logger branch code blocking tests * black fix * fix missing sources in docstr/daily candle dt tz * load_candle refactor with docstring * moved insiders to disc * Lucas logging changes * Fixing log_collection.md * testing scenario * more ta converted * more ta * Update config_terminal : remove print of verbosity * table cfg/fix matplt/ screener + * fix * what's sleep? 1 more line.. or 2. scr df2img * juan more. fix news 1m chart issue * ticker.upper() fixes * Update log collection * Updating log collection - change tmp folder Co-authored-by: LBolte29 <lbolte@gmx.net> Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: LBolte29 <97528701+LBolte29@users.noreply.github.com> Co-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com> Co-authored-by: didierlopes.eth <dro.lopes@campus.fct.unl.pt>
OpenBBTerminal
15
Python
54
helpers.py
def groupme(self, func, group_id, name, *args, **kwargs): data = func(*args, **kwargs) if "imagefile" in data: imagefile = cfg.IMG_DIR / data["imagefile"] send_image(imagefile, group_id, data.get("description", ""), True) elif "embeds_img" in data: imagefiles = data["images_list"] for img in imagefiles: imagefile = cfg.IMG_DIR / img send_image(imagefile, group_id, data.get("description", ""), True) elif "description" in data: title = data.get("title", "") # TODO: Allow navigation through pages description = data.get("description") if isinstance(description, List): clean_desc = description[0].replace("Page ", "") else: clean_desc = description.replace("Page ", "") message = f"{title}\n{clean_desc}" send_message(message, group_id) os.remove(imagefile)
50cafd500ece43df98e3cf076d81084b2806ea03
171
https://github.com/OpenBB-finance/OpenBBTerminal.git
302
def groupme(self, func, group_id, name, *args, **kwargs): data = func(*args, **kwargs) if "imagefile" in data: imagefile = cfg.IMG_DIR / data["imagefile"]
25
297
groupme
57
0
3
19
scapy/contrib/automotive/scanner/executor.py
209,484
Add assert to GMLAN Scanner to enforce fast fail on to many open TestSockets Fix bugs in TestSocket Fix bugs in the AutomotiveScanner execution_time handling Simplify test code for UDS_Scanner and reuse ObjectPipes to avoid mass creation
scapy
15
Python
48
executor.py
def execute_test_case(self, test_case, kill_time=None): # type: (AutomotiveTestCaseABC, Optional[float]) -> None test_case.pre_execute( self.socket, self.target_state, self.configuration) try: test_case_kwargs = self.configuration[test_case.__class__.__name__] except KeyError: test_case_kwargs = dict() if kill_time: max_execution_time = max(int(kill_time - time.time()), 5) cur_execution_time = test_case_kwargs.get("execution_time", 1200) test_case_kwargs["execution_time"] = min(max_execution_time, cur_execution_time) log_interactive.debug("[i] Execute test_case %s with args %s", test_case.__class__.__name__, test_case_kwargs) test_case.execute(self.socket, self.target_state, **test_case_kwargs) test_case.post_execute( self.socket, self.target_state, self.configuration) self.check_new_states(test_case) self.check_new_testcases(test_case)
e6eaa484b8fa3d10051e82f5a784fe8dedbd5592
148
https://github.com/secdev/scapy.git
292
def execute_test_case(self, test_case, kill_time=None): # type: (AutomotiveTestCaseABC, Optional[float]) -> None test_case.pre_execute( self.socket, self.target_state, self.configuration) try: test_case_kwargs = self.configuration[test_case.__class__.__name__] except KeyError: test_case_kwargs = dict() if kill_time: max_execution_time = max(int(kill_time - time.time()), 5) cur_execution_time = test_case_kwargs.get("execution_time", 1200) test_case_kwargs["execution_time"] = min(max_execution_time, cur_execution_time) log_interactive.debug("[i] Execute test_case %s with args %s", test_case.__class__.__name__, test_case_kwargs) test_case.execute(self.socket, self.target_state, **test_case_kwargs) test_case.po
26
234
execute_test_case
49
0
1
22
tests/components/history_stats/test_sensor.py
313,481
Fix dropouts in history_stats graphs on restart (#73110)
core
13
Python
40
test_sensor.py
async def test_invalid_entity_in_template(hass, recorder_mock): await async_setup_component( hass, "sensor", { "sensor": { "platform": "history_stats", "entity_id": "binary_sensor.test_id", "name": "test", "state": "on", "end": "{{ states('binary_sensor.invalid').attributes.time }}", "duration": "01:00", }, }, ) await hass.async_block_till_done() assert hass.states.get("sensor.test") is None next_update_time = dt_util.utcnow() + timedelta(minutes=1) with freeze_time(next_update_time): async_fire_time_changed(hass, next_update_time) await hass.async_block_till_done() assert hass.states.get("sensor.test") is None
0505c596a563c92def54ea8108be09a338a0dd53
108
https://github.com/home-assistant/core.git
227
async def test_invalid_entity_in_template(hass, recorder_mock): await async_setup_component( hass, "sensor", { "sensor": { "platform": "history_stats", "entity_id": "binary_sensor.test_id", "name": "test", "state": "on", "end": "{{ states('binary_sensor.invalid').attributes.time }}", "duration": "01:00", }, }, ) await hass.async_block_till_done() assert hass.states.get("sensor.test") is None next_update_time = dt_util.utcnow() + timedelta(minutes=1) with freeze_time(next_update_time): async_fire_time_changed(hass, next_update_time) await hass.async_blo
14
201
test_invalid_entity_in_template
179
0
1
34
doc/source/ray-core/_examples/dask_xgboost/dask_xgboost.py
130,058
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
11
Python
136
dask_xgboost.py
def tune_xgboost(train_df, test_df, target_column): # Set XGBoost config. config = { "tree_method": "approx", "objective": "binary:logistic", "eval_metric": ["logloss", "error"], "eta": tune.loguniform(1e-4, 1e-1), "subsample": tune.uniform(0.5, 1.0), "max_depth": tune.randint(1, 9), } ray_params = RayParams( max_actor_restarts=1, cpus_per_actor=cpus_per_actor, num_actors=num_actors ) tune_start_time = time.time() analysis = tune.run( tune.with_parameters( train_xgboost, train_df=train_df, test_df=test_df, target_column=target_column, ray_params=ray_params, ), # Use the `get_tune_resources` helper function to set the resources. resources_per_trial=ray_params.get_tune_resources(), config=config, num_samples=10, metric="eval-error", mode="min", ) tune_end_time = time.time() tune_duration = tune_end_time - tune_start_time print(f"Total time taken: {tune_duration} seconds.") accuracy = 1.0 - analysis.best_result["eval-error"] print(f"Best model parameters: {analysis.best_config}") print(f"Best model total accuracy: {accuracy:.4f}") return analysis.best_config ############################################################################### # Hyperparameter optimization may take some time to complete. tune_xgboost(train_df, eval_df, LABEL_COLUMN) ############################################################################### # Prediction # ---------- # With the model trained, we can now predict on unseen data. For the # purposes of this example, we will use the same dataset for prediction as # for training. # # Since prediction is naively parallelizable, distributing it over multiple # actors can measurably reduce the amount of time needed. inference_df = RayDMatrix(data, ignore=[LABEL_COLUMN, "partition"]) results = predict( bst, inference_df, ray_params=RayParams( cpus_per_actor=cpus_per_actor_inference, num_actors=num_actors_inference ), ) print(results)
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
191
https://github.com/ray-project/ray.git
386
def tune_xgboost(train_df, test_df, target_column): # Set XGBoost config. config = { "tree_method": "approx", "objective": "binary:logistic
42
390
tune_xgboost
91
0
9
24
dask/array/percentile.py
155,524
Replace `interpolation` with `method` and `method` with `internal_method` (#8525) Following the change in numpy 1.22.0 Co-authored-by: James Bourbeau <jrbourbeau@users.noreply.github.com>
dask
14
Python
56
percentile.py
def _percentile(a, q, method="linear"): n = len(a) if not len(a): return None, n if isinstance(q, Iterator): q = list(q) if a.dtype.name == "category": result = np_percentile(a.cat.codes, q, method=method) import pandas as pd return pd.Categorical.from_codes(result, a.dtype.categories, a.dtype.ordered), n if type(a.dtype).__name__ == "DatetimeTZDtype": import pandas as pd if isinstance(a, (pd.Series, pd.Index)): a = a.values if np.issubdtype(a.dtype, np.datetime64): values = a a2 = values.view("i8") result = np_percentile(a2, q, method=method).astype(values.dtype) if q[0] == 0: # https://github.com/dask/dask/issues/6864 result[0] = min(result[0], values.min()) return result, n if not np.issubdtype(a.dtype, np.number): method = "nearest" return np_percentile(a, q, method=method), n
3c46e89aea2af010e69049cd638094fea2ddd576
236
https://github.com/dask/dask.git
238
def _percentile(a, q, method="linear"): n = len(a) if not len(a): return None, n if isinstance(q, Iterator): q = list(q) if a.dtype.name == "category":
34
369
_percentile
13
0
1
3
lib/matplotlib/streamplot.py
107,979
Implement proposed enhancement from https://github.com/matplotlib/matplotlib/issues/8388.
matplotlib
8
Python
13
streamplot.py
def start_trajectory(self, xg, yg, broken_streamlines=True): xm, ym = self.grid2mask(xg, yg) self.mask._start_trajectory(xm, ym, broken_streamlines)
5495fd220f4e2df0eb801ed9dfcfd6b557377ca2
37
https://github.com/matplotlib/matplotlib.git
26
def start_trajectory(self, xg, yg, broken_streamlines=True): xm, ym = self.grid2mask(xg, yg) sel
10
53
start_trajectory
112
0
1
71
tests/push/test_push_rule_evaluator.py
249,691
Implementation for MSC3664: Pushrules for relations (#11804)
synapse
15
Python
45
test_push_rule_evaluator.py
def test_related_event_match_with_fallback(self): evaluator = self._get_evaluator( { "m.relates_to": { "event_id": "$parent_event_id", "key": "😀", "rel_type": "m.thread", "is_falling_back": True, "m.in_reply_to": { "event_id": "$parent_event_id", }, } }, { "m.in_reply_to": { "event_id": "$parent_event_id", "type": "m.room.message", "sender": "@other_user:test", "room_id": "!room:test", "content.msgtype": "m.text", "content.body": "Original message", "im.vector.is_falling_back": "", }, "m.thread": { "event_id": "$parent_event_id", "type": "m.room.message", "sender": "@other_user:test", "room_id": "!room:test", "content.msgtype": "m.text", "content.body": "Original message", }, }, ) self.assertTrue( evaluator.matches( { "kind": "im.nheko.msc3664.related_event_match", "key": "sender", "rel_type": "m.in_reply_to", "pattern": "@other_user:test", "include_fallbacks": True, }, "@user:test", "display_name", ) ) self.assertFalse( evaluator.matches( { "kind": "im.nheko.msc3664.related_event_match", "key": "sender", "rel_type": "m.in_reply_to", "pattern": "@other_user:test", "include_fallbacks": False, }, "@user:test", "display_name", ) ) self.assertFalse( evaluator.matches( { "kind": "im.nheko.msc3664.related_event_match", "key": "sender", "rel_type": "m.in_reply_to", "pattern": "@other_user:test", }, "@user:test", "display_name", ) )
2d0ba3f89aaf9545d81c4027500e543ec70b68a6
216
https://github.com/matrix-org/synapse.git
1,197
def test_related_event_match_with_fallback(self): evaluator = self._get_evaluator( { "m.relates_to": { "event_id": "$parent_event_id", "key": "😀", "rel_type": "m.thread", "is_falling_back": True, "m.in_reply_to": { "event_id": "$parent_event_id", }, } }, { "m.in_reply_to": { "event_id": "$parent_event_id", "type": "m.room.message", "sender": "@other_user:test", "room_id": "!room:test", "content.msgtype": "m.text", "content.body": "Original message", "im.vector.is_falling_back": "", }, "m.thread": { "event_id": "$parent_event_id", "type": "m.room.message", "sender": "@other_user:test", "room_id": "!room:test", "content.msgtype": "m.text", "content.body": "Original message", }, }, ) self.assertTrue( evaluator.matches( { "kind": "im.nheko.msc3664.related_event_match", "key": "sender", "rel_type": "m.in_reply_to", "pattern": "@other_user:test", "include_fallbacks": True, }, "@user:test", "display_name", ) ) self.assertFalse( evaluator.matches( { "kind": "im.nheko.msc3664.related_event_match", "key": "sender",
7
433
test_related_event_match_with_fallback
22
0
1
6
tests/css/test_parse.py
184,745
Renaming opacity to text-opacity in code
textual
10
Python
20
test_parse.py
def test_opacity_to_styles(self, css_value, styles_value): css = f"#some-widget {{ text-opacity: {css_value} }}" stylesheet = Stylesheet() stylesheet.add_source(css) assert stylesheet.rules[0].styles.text_opacity == styles_value assert not stylesheet.rules[0].errors
210214260d6272ed8af52608bbbd1de4cff91f12
47
https://github.com/Textualize/textual.git
56
def test_opacity_to_styles(self, css_value, styles_value): css = f"#some-widget {{ text-opacity: {css_value} }}" stylesheet = Stylesheet() stylesheet.add_source(css) assert stylesheet.rules[0].styles.text_opacity == styles_value assert not stylesheet.rules[0].errors
12
79
test_opacity_to_styles
10
0
1
6
tests/cli/test_work_queues.py
58,581
Update work-queue CLI to accept both IDs and names
prefect
12
Python
10
test_work_queues.py
def test_inspect_by_id(work_queue): invoke_and_assert( command=f"work-queue inspect {work_queue.id}", expected_output_contains=[f"id='{work_queue.id}'", f"name={work_queue.name!r}"], expected_code=0, )
8abc1c25727c0236d52b025dc2e2062f3e67e94b
27
https://github.com/PrefectHQ/prefect.git
36
def test_inspect_by_id(work_queue): invoke_and_assert(
8
63
test_inspect_by_id
19
0
2
5
PySimpleGUI.py
212,840
Added propagate parameter to the Element.bind and Window.bind methods. Indicates whether tkinter should propagate the event to the corresponding element/window or stop with the user callback
PySimpleGUI
10
Python
19
PySimpleGUI.py
def bind(self, bind_string, key, propagate=True): if not self._is_window_created('tried Window.bind'): return self.TKroot.bind(bind_string, lambda evt: self._user_bind_callback(bind_string, evt, propagate)) self.user_bind_dict[bind_string] = key
b3680477c755277192715b343e9cd4254de7c45e
54
https://github.com/PySimpleGUI/PySimpleGUI.git
58
def bind(self, bind_string, key, propagate=True):
10
85
bind
27
0
1
12
tests/integration/high_order_matches/test_document.py
11,577
refactor: unify port args (#4382)
jina
17
Python
21
test_document.py
def test_multi_executor(): f = ( Flow(port=exposed_port) .add(uses={'jtype': 'MatchAdder', 'with': {'traversal_paths': 'r'}}) .add(uses={'jtype': 'MatchAdder', 'with': {'traversal_paths': 'm'}}) ) with f: results = Client(port=exposed_port, return_responses=True).post( on='index', inputs=Document(), ) validate_results(results)
51403a57d03f0b1ddfd7fc533ccee78e23f5faa1
85
https://github.com/jina-ai/jina.git
95
def test_multi_executor(): f = ( Flow(port=exposed_port) .add(uses={'jtype': 'MatchAdder', 'with': {'traversal_paths': 'r'}}) .add(uses={'jtype': 'MatchAdder', 'with': {'traversal_paths': 'm'}}) ) with f: results = Client(port=exposed_port, return_responses=True).post( on='index',
15
155
test_multi_executor
22
0
2
8
python3.10.4/Lib/distutils/tests/test_bdist_rpm.py
223,057
add python 3.10.4 for windows
XX-Net
11
Python
21
test_bdist_rpm.py
def setUp(self): try: sys.executable.encode("UTF-8") except UnicodeEncodeError: raise unittest.SkipTest("sys.executable is not encodable to UTF-8") super(BuildRpmTestCase, self).setUp() self.old_location = os.getcwd() self.old_sys_argv = sys.argv, sys.argv[:]
8198943edd73a363c266633e1aa5b2a9e9c9f526
58
https://github.com/XX-net/XX-Net.git
78
def setUp(self): try: sys.executable.encode("UTF-8") except UnicodeEncodeError: raise
15
99
setUp
63
0
5
14
torchvision/ops/deform_conv.py
192,182
Consolidate repr (#5392) * Consolidating __repr__ strings Co-authored-by: Vasilis Vryniotis <datumbox@users.noreply.github.com>
vision
12
Python
37
deform_conv.py
def __repr__(self) -> str: s = ( f"{self.__class__.__name__}(" f"{self.in_channels}" f", {self.out_channels}" f", kernel_size={self.kernel_size}" f", stride={self.stride}" ) s += f", padding={self.padding}" if self.padding != (0, 0) else "" s += f", dilation={self.dilation}" if self.dilation != (1, 1) else "" s += f", groups={self.groups}" if self.groups != 1 else "" s += ", bias=False" if self.bias is None else "" s += ")" return s
93c85bbcc31f8d5a052daf06f2f91f39697af1a4
81
https://github.com/pytorch/vision.git
173
def __repr__(self) -> str: s = ( f"{self.__class__.__name__}(" f"{self.in_channels}" f", {self.out_channels}" f", kernel_size={self.kernel_size}" f", stride={self.stride}" ) s += f", padding={self.padding}" if self.padding != (0, 0) else "" s += f", dilation={self.dilation}" if self.dilation != (1, 1) else "" s += f", groups={self.groups}" if self.groups != 1 else "" s += ", bias=False" if self.bias is None else ""
14
194
__repr__
25
0
3
7
keras/utils/feature_space.py
280,476
Add FeatureSpace utility. PiperOrigin-RevId: 487344904
keras
11
Python
18
feature_space.py
def _cross_features(self, features): all_outputs = {} for cross in self.crosses: inputs = [features[name] for name in cross.feature_names] outputs = self.crossers[cross.name](inputs) all_outputs[cross.name] = outputs return all_outputs
9fd2946909b1b26d05593c7249f2381c3d93d382
55
https://github.com/keras-team/keras.git
78
def _cross_features(self, features): all_outputs = {} for cross in self.crosses: inputs = [features[name] fo
11
83
_cross_features
74
0
1
7
lib/mpl_toolkits/mplot3d/axes3d.py
107,026
Clean up 3d plot box_aspect zooming linting Cleanup Make zoom and dist private attrs Deprecate Axes3D.dist Deprecate Axes3D.dist
matplotlib
9
Python
50
axes3d.py
def set_top_view(self): # this happens to be the right view for the viewing coordinates # moved up and to the left slightly to fit labels and axes xdwl = 0.95 / self._dist xdw = 0.9 / self._dist ydwl = 0.95 / self._dist ydw = 0.9 / self._dist # This is purposely using the 2D Axes's set_xlim and set_ylim, # because we are trying to place our viewing pane. super().set_xlim(-xdwl, xdw, auto=None) super().set_ylim(-ydwl, ydw, auto=None)
b1737e0ec9b274a979dc6c13d328cf494a657214
71
https://github.com/matplotlib/matplotlib.git
143
def set_top_view(self): # this happens to be the right view for the viewing coordinates # moved up and to the left slightly to fit labels and axes xdwl = 0.95 / self._dist xdw = 0.9 / self._dist ydwl = 0.95 / self._dist ydw = 0.9 / sel
11
103
set_top_view
22
0
1
7
wagtail/admin/tests/test_page_chooser.py
72,060
Reformat with black
wagtail
11
Python
18
test_page_chooser.py
def test_locale_selector_present_in_root_view(self): response = self.client.get(reverse("wagtailadmin_choose_page")) html = response.json().get("html") self.assertIn(self.LOCALE_SELECTOR_HTML, html) switch_to_french_url = self.get_choose_page_url(locale=self.fr_locale) fr_selector = f'<a href="{switch_to_french_url}" aria-label="French" class="u-link is-live">' self.assertIn(fr_selector, html)
d10f15e55806c6944827d801cd9c2d53f5da4186
64
https://github.com/wagtail/wagtail.git
63
def test_locale_selector_present_in_root_view(self): response = self.client.get(reverse("wagtailadmin_choose_page")) html = response.json().get("html") self.assertIn(self.LOCALE_SELECTOR_HTML, html) switch_to_french_url = self.get_choose_page_url(locale=self.fr_locale) fr_selector = f'<a href="{switch_to_french_url}" aria-label="French" class="u-link is-live">' self.assertIn(fr_selector, html)
15
111
test_locale_selector_present_in_root_view
42
0
1
10
tests/unit_tests/docstore/test_inmemory.py
191,528
wip: add method for both docstore and embeddings (#119) this will break atm but wanted to get thoughts on implementation. 1. should add() be on docstore interface? 2. should InMemoryDocstore change to take a list of documents as init? (makes this slightly easier to implement in FAISS -- if we think it is less clean then could expose a method to get the number of documents currently in the dict, and perform the logic of creating the necessary dictionary in the FAISS.add_texts method. Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
langchain
12
Python
33
test_inmemory.py
def test_adding_document_already_exists() -> None: _dict = {"foo": Document(page_content="bar")} docstore = InMemoryDocstore(_dict) new_dict = {"foo": Document(page_content="foo")} # Test that error is raised. with pytest.raises(ValueError): docstore.add(new_dict) # Test that old document is the same. bar_output = docstore.search("foo") assert isinstance(bar_output, Document) assert bar_output.page_content == "bar"
315b0c09c614fa44daa61529d1f1da2fe827b16c
72
https://github.com/hwchase17/langchain.git
79
def test_adding_document_already_exists() -> None: _dict = {"foo": Document(page_content="bar")} docstore = InMemoryDocstore(_dict) new_dict = {"foo": Document(page_content="foo")} # T
14
134
test_adding_document_already_exists
47
0
4
15
shell-integration/ssh/bootstrap.py
103,191
Avoid unnecessary which and fix typos
kitty
12
Python
40
bootstrap.py
def compile_terminfo(base): tic = shutil.which('tic') if not tic: return tname = '.terminfo' if os.path.exists('/usr/share/misc/terminfo.cdb'): tname += '.cdb' os.environ['TERMINFO'] = os.path.join(HOME, tname) cp = subprocess.run( [tic, '-x', '-o', os.path.join(base, tname), os.path.join(base, '.terminfo', 'kitty.terminfo')], stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) if cp.returncode != 0: sys.stderr.buffer.write(cp.stdout) raise SystemExit('Failed to compile the terminfo database')
397fbe7ad32cb455a17a44e3868f2d3582e7c998
125
https://github.com/kovidgoyal/kitty.git
112
def compile_terminfo(base): tic = shutil.which('tic') if not tic: return tname = '.terminfo' if os.path.exists('/usr/share/misc/terminfo.cdb'): tname += '.cdb' os.environ['TERMINFO'] = os.path.join(HOME, tname) cp = subprocess.run( [tic, '-x', '-o', os.path.join(base, tname), os.path.join(base, '.terminfo', 'kitty.terminfo')], stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) if cp.returncode != 0: sys.stderr.buffer.write(cp.stdout) raise SystemExit('Failed to c
24
208
compile_terminfo
8
0
1
3
homeassistant/components/wemo/sensor.py
294,683
Use device properties for WeMo Insight sensors (#63525)
core
7
Python
8
sensor.py
def unique_id_suffix(self) -> str | None: return self.entity_description.unique_id_suffix
c6ba987995e4c726614ffbde2b31ce01a034aab3
16
https://github.com/home-assistant/core.git
22
def unique_id_suffix(self) -> str | None: return self.entity_description.unique_id_suf
4
28
unique_id_suffix
9
0
1
4
tests/providers/amazon/aws/operators/test_sagemaker_model.py
45,297
Add SageMakerDeleteModelOperator (#21673) * Implement SagemakerDeleteModelOperator
airflow
9
Python
9
test_sagemaker_model.py
def test_execute(self, delete_model, mock_client): delete_model.return_value = None self.sagemaker.execute(None) delete_model.assert_called_once_with(model_name='test')
cb24ee9414afcdc1a2b0fe1ec0b9f0ba5e1bd7b7
30
https://github.com/apache/airflow.git
29
def test_execute(self, delete_model, mock_client): delete_model.return_value = None self.sagemaker.execute(None) delete_model.assert_called_once_with(model_name='test')
9
49
test_execute
116
0
9
16
erpnext/accounts/dashboard_chart_source/account_balance_timeline/account_balance_timeline.py
69,446
perf: use `get_cached_value` instead of `db.get_value` in accounts module
erpnext
16
Python
75
account_balance_timeline.py
def build_result(account, dates, gl_entries): result = [[getdate(date), 0.0] for date in dates] root_type = frappe.get_cached_value("Account", account, "root_type") # start with the first date date_index = 0 # get balances in debit for entry in gl_entries: # entry date is after the current pointer, so move the pointer forward while getdate(entry.posting_date) > result[date_index][0]: date_index += 1 result[date_index][1] += entry.debit - entry.credit # if account type is credit, switch balances if root_type not in ("Asset", "Expense"): for r in result: r[1] = -1 * r[1] # for balance sheet accounts, the totals are cumulative if root_type in ("Asset", "Liability", "Equity"): for i, r in enumerate(result): if i > 0: r[1] = r[1] + result[i - 1][1] return result
27df455b9862396a192ce381f2e34f0d3cb94e5e
155
https://github.com/frappe/erpnext.git
95
def build_result(account, dates, gl_entries): result = [[getdate(date), 0.0] for date in dates] root_type = frappe.get_cached_value("Account", account, "root_type") # start with the first date date_index = 0 # get balances in debit for entry in gl_entries: # entry date is after the current pointer, so move the pointer forward while getdate(entry.posting_date) > result[date_index][0]: date_index += 1 result[date_index][1] += entry.debit - entry.cre
18
242
build_result
107
0
8
31
ci/travis/bazel.py
129,795
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
18
Python
70
bazel.py
def textproto_split(input_lines, json_encoder): outputs = [] re_flags = re.M pat_open = re.compile(b"^(\\s*)([-\\w:]+)(\\s*){$", flags=re_flags) pat_line = re.compile(b"^(\\s*)([-\\w]+): (.*)$", flags=re_flags) pat_close = re.compile(b"}$", flags=re_flags) prev_comma = False prev_tail = b"" for full_line in input_lines: pieces = re.split(b"(\\r|\\n)", full_line, 1) pieces[1:] = [b"".join(pieces[1:])] [line, tail] = pieces next_line = pat_open.sub(b'\\1["\\2",\\3[', line) outputs.append( b"" if not prev_comma else b"]" if next_line.endswith(b"}") else b"," ) next_line = pat_close.sub(b"]", next_line) next_line = pat_line.sub( lambda m: textproto_format(*(m.groups() + (json_encoder,))), next_line ) outputs.append(prev_tail + next_line) if line == b"}": yield b"".join(outputs) del outputs[:] prev_comma = line != b"}" and ( next_line.endswith(b"]") or next_line.endswith(b'"') ) prev_tail = tail if len(outputs) > 0: yield b"".join(outputs) del outputs[:]
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
256
https://github.com/ray-project/ray.git
304
def textproto_split(input_lines, json_encoder): outputs = [] re_flags = re.M pat_open = re.compile(b"^(\\s*)([-\\w:]+)(\\s*){$", flags=re_flags) pat_line = re.compile(b"^(\\s*)([-\\w]+): (.*)$", flags=re_flags) pat_close = re.compile(b"}$", flags=re_flags) prev_comma = False prev_tail = b"" for full_line in input_lines: pieces = re.split(b"(\\r|\\n)", full_line, 1) pieces[1:] = [b"".join(pieces[1:])] [line, tail] = pieces next_line = pat_open.sub(b'\\1["\\2",\\3[', line) outputs.append( b"" if not prev_comma else b"]" if next_line.endswith(b"}") else b"," ) next_line = pat_close.sub(b"]", next_line) next_line = pat_line.sub( lambda m: textproto_format(*(m.groups() + (json_encoder,))), next_line ) outputs.append(prev_tail + next_line) if line == b"}": yield b"".join(outputs)
28
418
textproto_split
18
0
1
11
tests/sentry/snuba/test_tasks.py
91,148
feat(mep): Add `build_snql_query` method to replace `build_snuba_filter` (#35537) This adds in `build_snql_query`, which is intended to replace `build_snuba_filter` everywhere. Initially, I'm just implementing this in the error and transaction entities. I'll follow up with sessions and metrics in a later pr. This function uses `QueryBuilder` to build the snql queries that we'll use to create alert rules. Currently, `QueryBuilder` requires that all queries have a start/end passed since those are required for all on demand queries that users make. Alert rules operate differently - we explicitly can't pass a start/end, since these alerts operate on a time window, and snuba adds in the time component every time these subscriptions run. To support this, I've added the ability to skip start/end checks in `QueryBuilder`. For testing, I just copied `BuildSnubaFilterTest` and converted it to check snql queries instead. Since `build_snuba_filter` will go away soon I'm not concerned about duplication here. A bunch of tests here are still commented out - these are all session/metric related. I'll fix those tests in a follow up pr as well.
sentry
12
Python
18
test_tasks.py
def test_user_query_transactions(self): expected_conditions = [ Condition(Column("user"), Op.EQ, "anengineer@work.io"), Condition(Column("project_id"), Op.IN, (self.project.id,)), ] self.run_test( QueryDatasets.TRANSACTIONS, "p95()", "user:anengineer@work.io", expected_conditions, )
c1aa08e0f83144dd42b8e9dfef660b7a35f1e803
59
https://github.com/getsentry/sentry.git
111
def test_user_query_transactions(self): expected_conditions = [ Condition(Column("user"), Op.EQ, "anengineer@work.io"), Condition(Column("project_id"), Op.IN, (self.project.id,)), ] self.run_test( QueryDatasets.TRANSACTIONS, "p95()", "user:anengineer@work.io",
13
94
test_user_query_transactions
69
0
1
31
tests/models/test_dag.py
47,588
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
airflow
10
Python
45
test_dag.py
def _make_test_subdag(self, session): dag_id = 'test_subdag' self._clean_up(dag_id) task_id = 't1' dag = DAG(dag_id, start_date=DEFAULT_DATE, max_active_runs=1) t_1 = EmptyOperator(task_id=task_id, dag=dag) subdag = DAG(dag_id + '.test', start_date=DEFAULT_DATE, max_active_runs=1) SubDagOperator(task_id='test', subdag=subdag, dag=dag) t_2 = EmptyOperator(task_id='task', dag=subdag) subdag.parent_dag = dag dag.sync_to_db() session = settings.Session() dag.create_dagrun( run_type=DagRunType.MANUAL, state=State.FAILED, start_date=DEFAULT_DATE, execution_date=DEFAULT_DATE, session=session, ) subdag.create_dagrun( run_type=DagRunType.MANUAL, state=State.FAILED, start_date=DEFAULT_DATE, execution_date=DEFAULT_DATE, session=session, ) task_instance_1 = TI(t_1, execution_date=DEFAULT_DATE, state=State.RUNNING) task_instance_2 = TI(t_2, execution_date=DEFAULT_DATE, state=State.RUNNING) session.merge(task_instance_1) session.merge(task_instance_2) return dag, subdag
49e336ae0302b386a2f47269a6d13988382d975f
210
https://github.com/apache/airflow.git
318
def _make_test_subdag(self, session): dag_id = 'test_subdag' self._clean_up(dag_id) task_id = 't1' dag = DAG(dag_id, start_date=DEFA
33
321
_make_test_subdag
21
1
1
9
tests/test_widget.py
185,846
Add get_child_by_id and get_widget_by_id (#1146) * Add get_child_by_id and get_widget_by_id * Remove redundant code * Add unit tests for app-level get_child_by_id and get_widget_by_id * Remove redundant test fixture injection * Update CHANGELOG * Enforce uniqueness of ID amongst widget children * Enforce unique widget IDs amongst widgets mounted together * Update CHANGELOG.md * Ensuring unique IDs in a more logical place * Add docstring to NodeList._get_by_id * Dont use duplicate IDs in tests, dont mount 2000 widgets * Mounting less widgets in a unit test * Reword error message * Use lower-level depth first search in get_widget_by_id to break out early
textual
10
Python
19
test_widget.py
def compose(self) -> ComposeResult: grandchild1 = Widget(id="grandchild1") child1 = Widget(grandchild1, id="child1") child2 = Widget(id="child2") yield Widget( child1, child2, id="parent", ) @pytest.fixture
df37a9b90a52de91643ea4dc01b21f32dbeca718
@pytest.fixture
45
https://github.com/Textualize/textual.git
87
def compose(self) -> ComposeResult: grandchild1 = Widget(id="grandchild1") child1 = Widget(grandchild1, id="child1") child2 = Widget(id="child2") yield Widget(
10
85
compose
21
0
1
17
setup.py
195,659
[Mod] update requirements.txt for Python 3.10
vnpy
8
Python
20
setup.py
def get_install_requires(): install_requires = [ "tzlocal>=2.0.0", "PyQt5>=5.15.6", "pyqtgraph>=0.12.3", "qdarkstyle>=3.0.3", "numpy>=1.22.1", "pandas>=1.4.0", "matplotlib>=3.5.1", "seaborn>=0.11.2", "ta-lib>=0.4.24", "deap>=1.3.1", "pyzmq>=22.3.0", "QScintilla>=2.13.1", "plotly>=5.5.0", ] return install_requires
415a6c8615125c6128155633d4527e332a68347b
36
https://github.com/vnpy/vnpy.git
120
def get_install_requires(): install_requires = [ "tzlocal>=2.0.0", "PyQt5>=5.15.6", "pyqtgraph>=0.12.3", "qdarkstyle>=3.0.3", "numpy>=1.22.1", "pandas>=1.4.0", "matplotlib>=3.5.1", "seaborn>=0.11.2", "ta-lib>=0.4.24", "deap>=1.3.1", "pyzmq>=22.3.0", "QScintilla>=2.13.1", "plotly>=5.5.0", ] return install_requi
2
70
get_install_requires
134
0
8
16
python3.10.4/Lib/ipaddress.py
218,485
add python 3.10.4 for windows
XX-Net
11
Python
88
ipaddress.py
def _parse_octet(cls, octet_str): if not octet_str: raise ValueError("Empty octet not permitted") # Reject non-ASCII digits. if not (octet_str.isascii() and octet_str.isdigit()): msg = "Only decimal digits permitted in %r" raise ValueError(msg % octet_str) # We do the length check second, since the invalid character error # is likely to be more informative for the user if len(octet_str) > 3: msg = "At most 3 characters permitted in %r" raise ValueError(msg % octet_str) # Handle leading zeros as strict as glibc's inet_pton() # See security bug bpo-36384 if octet_str != '0' and octet_str[0] == '0': msg = "Leading zeros are not permitted in %r" raise ValueError(msg % octet_str) # Convert to integer (we know digits are legal) octet_int = int(octet_str, 10) if octet_int > 255: raise ValueError("Octet %d (> 255) not permitted" % octet_int) return octet_int
8198943edd73a363c266633e1aa5b2a9e9c9f526
105
https://github.com/XX-net/XX-Net.git
320
def _parse_octet(cls, octet_str): if not octet_str: raise ValueError("Empty octet not permitted") # Reject non-ASCII digits. if not (octet_str.isascii() and octet_str.isdigit()): msg = "Only decimal digits permitted in %r" raise ValueError(msg % octet_str) # We do the length check second, since the invalid character error # is likely to be more informative for the user if len(octet_str) > 3: msg = "At most 3 characters permitted in %r" raise ValueError(msg % octet_str) # Handle leading zeros as strict as glibc's inet_pton() # See security bug bpo-36384 if octet_str != '0' and octet_str[0] == '0': msg = "Leading zeros are not permitted in %r" raise ValueError(msg % o
10
190
_parse_octet
9
0
1
2
bokeh/server/django/routing.py
212,517
Normalize built-in types and remove `Unknown` (#12252) * Use lower case names for built-in types Also incidentally apply TypeAlias marker. * Drop `Unknown` in favour of consistent usage of `Any` * Enable lazy annotations in conftest.py
bokeh
10
Python
9
routing.py
def get_http_urlpatterns(self) -> list[URLPattern]: return self._http_urlpatterns + [url(r"", AsgiHandler)]
528d85e642340ef30ec91f30b65c7c43370f648d
24
https://github.com/bokeh/bokeh.git
15
def get_http_urlpatterns(self) -> list[URLPattern]: return self.
7
37
get_http_urlpatterns
74
0
3
18
pandas/tests/indexes/interval/test_setops.py
168,632
Revert Interval/IntervalIndex/interval_range.inclusive deprecation (#48116) * Revert "Cln tests interval wrt inclusive (#47775)" This reverts commit 2d6e0b251955d3a2c0c88f7e6ddb57b335ed09b7. * Revert "CLN: Rename private variables to inclusive (#47655)" This reverts commit 102b3ca2119df822e2b0f346fa936d0fe9f17501. * Revert "TYP: Improve typing interval inclusive (#47646)" This reverts commit 55064763e8ba55f6ff5370a8dd083767a189d7a4. * Revert "DEPR: Deprecate set_closed and add set_incluive (#47636)" This reverts commit bd4ff395cbbf4cbde1fc8f1f746cae064a401638. * Revert "DEPR: Remove deprecation from private class IntervalTree (#47637)" This reverts commit f6658ef9fdef5972214fdc338e2c6b5ee308dbf4. * Revert "Revert inclusive default change of IntervalDtype (#47367)" This reverts commit d9dd1289e07d86928d144e53beb3d5b8ab3c2215. * Revert "ENH: consistency of input args for boundaries - Interval (#46522)" This reverts commit 7e23a37e1c5bda81234801a6584563e2880769eb. * Revert "ENH: consistency of input args for boundaries - pd.interval_range (#46355)" This reverts commit 073b3535d7a5171102e5915c38b57c21d13795ae. * Fix ArrowIntervalType manually * Remove unused import * Fix doctest and leftover usage * Fix remaining tests * Fix wording in doctoring Co-authored-by: Patrick Hoefler <61934744+phofl@users.noreply.github.com>
pandas
11
Python
42
test_setops.py
def test_symmetric_difference(self, closed, sort): index = monotonic_index(0, 11, closed=closed) result = index[1:].symmetric_difference(index[:-1], sort=sort) expected = IntervalIndex([index[0], index[-1]]) if sort is None: tm.assert_index_equal(result, expected) assert tm.equalContents(result, expected) # GH 19101: empty result, same dtype result = index.symmetric_difference(index, sort=sort) expected = empty_index(dtype="int64", closed=closed) if sort is None: tm.assert_index_equal(result, expected) assert tm.equalContents(result, expected) # GH 19101: empty result, different dtypes other = IntervalIndex.from_arrays( index.left.astype("float64"), index.right, closed=closed ) result = index.symmetric_difference(other, sort=sort) expected = empty_index(dtype="float64", closed=closed) tm.assert_index_equal(result, expected)
252ae0555abf488522f947107dcdee684be6ac8a
182
https://github.com/pandas-dev/pandas.git
218
def test_symmetric_difference(self, closed, sort): index = monotonic_index(0, 11, closed=closed) result = index[1:].symmetric_difference(index[:-1], sort=sort) expected = IntervalIndex([index[0], index[-1]]) if sort is None: tm.assert_index
20
279
test_symmetric_difference
11
0
1
3
mkdocs/tests/config/config_options_tests.py
225,368
Add tests for new class-based configs The old-style tests are intentionally kept at config_options_legacy_tests.py
mkdocs
11
Python
11
config_options_tests.py
def test_provided_empty(self) -> None: conf = self.get_config(self.Schema, {'option': []}) self.assertEqual(conf.option, None)
ff8552a57abf2c32f2d0344ef12707b88e008493
34
https://github.com/mkdocs/mkdocs.git
24
def test_provided_empty(self) -> None: conf = self.get_config(self.Schema, {'option': []}) self.assertEqual(conf.option, None)
7
55
test_provided_empty
63
0
10
15
airflow/models/dag.py
46,505
Fix entire DAG stops when one task has end_date (#20920) related #19917 , #20471
airflow
14
Python
41
dag.py
def _time_restriction(self) -> TimeRestriction: start_dates = [t.start_date for t in self.tasks if t.start_date] if self.start_date is not None: start_dates.append(self.start_date) earliest = None if start_dates: earliest = timezone.coerce_datetime(min(start_dates)) latest = self.end_date end_dates = [t.end_date for t in self.tasks if t.end_date] if len(end_dates) == len(self.tasks): # not exists null end_date if self.end_date is not None: end_dates.append(self.end_date) if end_dates: latest = timezone.coerce_datetime(max(end_dates)) return TimeRestriction(earliest, latest, self.catchup)
85871eba420f3324432f55f74fe57005ff47a21c
133
https://github.com/apache/airflow.git
193
def _time_restriction(self) -> TimeRestriction: start_dates = [t.start_date for t in self.tasks if t.start_date] if self.start_date is not None: start_dates.append(self.start_date) earliest = None if start_dates: earliest = timezone.coerce_datetime(min(start_dates)) latest = self.end_date end_dates = [t.end_date for t in self.tasks if t.end_date] if len(end_dates) == len(self.tasks): # not exists null end_date if self.end_d
18
208
_time_restriction
203
0
1
73
tests/test_visualization/test_local_visualizer.py
245,471
Support panoptic_seg visualization (#8399) * Support panoptic_seg visualization * add comment * support obtain image * update
mmdetection
12
Python
89
test_local_visualizer.py
def test_add_datasample(self): h = 12 w = 10 num_class = 3 num_bboxes = 5 out_file = 'out_file.jpg' image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8') # test gt_instances gt_instances = InstanceData() gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w) gt_instances.labels = torch.randint(0, num_class, (num_bboxes, )) gt_det_data_sample = DetDataSample() gt_det_data_sample.gt_instances = gt_instances # det_local_visualizer = DetLocalVisualizer() det_local_visualizer.add_datasample('image', image, gt_det_data_sample) # test out_file det_local_visualizer.add_datasample( 'image', image, gt_det_data_sample, out_file=out_file) assert os.path.exists(out_file) drawn_img = cv2.imread(out_file) assert drawn_img.shape == (h, w, 3) os.remove(out_file) # test gt_instances and pred_instances pred_instances = InstanceData() pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w) pred_instances.labels = torch.randint(0, num_class, (num_bboxes, )) pred_instances.scores = torch.rand((num_bboxes, )) pred_det_data_sample = DetDataSample() pred_det_data_sample.pred_instances = pred_instances det_local_visualizer.add_datasample( 'image', image, gt_det_data_sample, pred_det_data_sample, out_file=out_file) self._assert_image_and_shape(out_file, (h, w * 2, 3)) det_local_visualizer.add_datasample( 'image', image, gt_det_data_sample, pred_det_data_sample, draw_gt=False, out_file=out_file) self._assert_image_and_shape(out_file, (h, w, 3)) det_local_visualizer.add_datasample( 'image', image, gt_det_data_sample, pred_det_data_sample, draw_pred=False, out_file=out_file) self._assert_image_and_shape(out_file, (h, w, 3)) # test gt_panoptic_seg and pred_panoptic_seg det_local_visualizer.dataset_meta = dict(CLASSES=('1', '2')) gt_sem_seg = _create_panoptic_data(num_bboxes, h, w) panoptic_seg = PixelData(sem_seg=gt_sem_seg) gt_det_data_sample = DetDataSample() gt_det_data_sample.gt_panoptic_seg = panoptic_seg pred_sem_seg = _create_panoptic_data(num_bboxes, h, w) panoptic_seg = PixelData(sem_seg=pred_sem_seg) pred_det_data_sample = DetDataSample() pred_det_data_sample.pred_panoptic_seg = panoptic_seg det_local_visualizer.add_datasample( 'image', image, gt_det_data_sample, pred_det_data_sample, out_file=out_file) self._assert_image_and_shape(out_file, (h, w * 2, 3)) # class information must be provided det_local_visualizer.dataset_meta = {} with self.assertRaises(AssertionError): det_local_visualizer.add_datasample( 'image', image, gt_det_data_sample, pred_det_data_sample, out_file=out_file)
5620fef4ad50ec4f82d6e553e7a4851495893c4b
444
https://github.com/open-mmlab/mmdetection.git
884
def test_add_datasample(self): h = 12 w = 10 num_class = 3 num_bboxes = 5 out_file = 'out_file.jpg' image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8') # test gt_instances gt_instances = InstanceData() gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w) gt_instances.labels = torch.randint(0, num_class, (num_bboxes, )) gt_det_data_sample = DetDataSample() gt_det_data_sample.gt_instances = gt_instances # det_local_visualizer = DetLocalVisualizer() det_local_visualizer.add_datasample('image', image, gt_det_data_sample) # test out_file det_local_visualizer.add_datasample( 'image', image, gt_det_data_sample, out_file=out_file) assert os.path.exists(out_file) drawn_img = cv2.imread(out_file) assert drawn_img.shape == (h, w, 3) os.remove(out_file) # test gt_instances and pred_instances pred_instances = InstanceData() pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w) pred_instances.labels = torch.randint(0, num_class, (num_bboxes, )) pred_instances.scores = torch.rand((num_bboxes, )) pred_det_data_sample = DetDataSample() pred_det_data_sample.pred_instances = pred_instances det_local_visualizer.add_datasample(
52
680
test_add_datasample
22
0
2
42
python/ray/tune/tests/test_integration_wandb.py
139,920
[tune] Move wandb logging directory into trial logdir (#25020) Weights and biases creates a wandb directory to collect intermediate logs and artifacts before uploading them. This directory should be in the respective trial directories. This also means we can re-enable auto resuming.
ray
14
Python
20
test_integration_wandb.py
def testWandbDecoratorConfig(self): config = {"par1": 4, "par2": 9.12345678} trial = Trial( config, 0, "trial_0", "trainable", PlacementGroupFactory([{"CPU": 1}]), "/tmp", ) trial_info = TrialInfo(trial)
f215c8c9887c5b0aa09ba36107391f706c8ddca8
336
https://github.com/ray-project/ray.git
115
def testWandbDecoratorConfig(self): config = {"par1": 4, "par2": 9.12345678} trial = Trial( config, 0, "trial_0", "trainable", P
8
82
testWandbDecoratorConfig
19
0
2
3
apps/authentication/backends/base.py
188,440
Fix rbac (#7713) * fix: token 系统用户增加 protocol * fix: 修复清除orphan session时同时清除对应的 session_task * perf: 修改 connection token api * fix: 修复无法获取系统角色绑定的问题 * perf: 增加 db terminal 及 magnus 组件 * perf: 修改 migrations * fix: 修复AUTHENTICATION_BACKENDS相关的逻辑 * fix: 修改判断backend认证逻辑 * fix: 修复资产账号查看密码跳过mfa * fix: 修复用户组授权权限错误 * feat: 支持COS对象存储 * feat: 升级依赖 jms_storage==0.0.42 * fix: 修复 koko api 问题 * feat: 修改存储翻译信息 * perf: 修改 ticket 权限 * fix: 修复获取资产授权系统用户 get_queryset * perf: 抽取 ticket * perf: 修改 cmd filter 的权限 * fix: 修改 ticket perm * fix: 修复oidc依赖问题 Co-authored-by: Eric <xplzv@126.com> Co-authored-by: ibuler <ibuler@qq.com> Co-authored-by: 小冯 <xiaofeng@xiaofengdeMacBook-Pro.local> Co-authored-by: feng626 <1304903146@qq.com>
jumpserver
9
Python
17
base.py
def user_can_authenticate(self, user): is_valid = getattr(user, 'is_valid', None) return is_valid or is_valid is None # allow user to authenticate
03afa4f9743fb8e6892be62a44b19dc48e0ed7f0
24
https://github.com/jumpserver/jumpserver.git
43
def user_can_authenticate(self, user): is_valid = getattr(user, 'is_valid', None) return is_v
5
41
user_can_authenticate
187
0
1
28
sympy/physics/mechanics/tests/test_jointsmethod.py
199,391
Deprecate parent_joint_pos and child_joint_pos
sympy
17
Python
108
test_jointsmethod.py
def test_chaos_pendulum(): #https://www.pydy.org/examples/chaos_pendulum.html mA, mB, lA, lB, IAxx, IBxx, IByy, IBzz, g = symbols('mA, mB, lA, lB, IAxx, IBxx, IByy, IBzz, g') theta, phi, omega, alpha = dynamicsymbols('theta phi omega alpha') A = ReferenceFrame('A') B = ReferenceFrame('B') rod = Body('rod', mass=mA, frame=A, central_inertia=inertia(A, IAxx, IAxx, 0)) plate = Body('plate', mass=mB, frame=B, central_inertia=inertia(B, IBxx, IByy, IBzz)) C = Body('C') with ignore_warnings(SymPyDeprecationWarning): J1 = PinJoint('J1', C, rod, coordinates=theta, speeds=omega, child_point=-lA * rod.z, parent_axis=C.y, child_axis=rod.y) J2 = PinJoint('J2', rod, plate, coordinates=phi, speeds=alpha, parent_point=(lB - lA) * rod.z, parent_axis=rod.z, child_axis=plate.z) rod.apply_force(mA*g*C.z) plate.apply_force(mB*g*C.z) method = JointsMethod(C, J1, J2) method.form_eoms() MM = method.mass_matrix forcing = method.forcing rhs = MM.LUsolve(forcing) xd = (-2 * IBxx * alpha * omega * sin(phi) * cos(phi) + 2 * IByy * alpha * omega * sin(phi) * cos(phi) - g * lA * mA * sin(theta) - g * lB * mB * sin(theta)) / (IAxx + IBxx * sin(phi)**2 + IByy * cos(phi)**2 + lA**2 * mA + lB**2 * mB) assert (rhs[0] - xd).simplify() == 0 xd = (IBxx - IByy) * omega**2 * sin(phi) * cos(phi) / IBzz assert (rhs[1] - xd).simplify() == 0
5b90f7d36b8291b61391d457bfad94648af8afe3
403
https://github.com/sympy/sympy.git
370
def test_chaos_pendulum(): #https://www.pydy.org/examples/chaos_pendulum.html mA, mB, lA, lB, IAxx, IBxx,
53
616
test_chaos_pendulum
146
0
6
45
tests/integration/states/test_pkgrepo.py
216,519
various changes and fixes needed to add PhotonOS into CICD.
salt
15
Python
96
test_pkgrepo.py
def test_pkgrepo_05_copr_with_comments(self, grains): kwargs = {} if grains["os_family"] == "RedHat": if ( grains["osfinger"] == "CentOS Linux-7" or grains["osfinger"] == "Amazon Linux-2" or grains["os"] == "VMware Photon OS" ): self.skipTest("copr plugin not installed on Centos 7 CI") kwargs = { "name": "hello-copr", "copr": "mymindstorm/hello", "enabled": False, "comments": ["This is a comment"], } else: self.skipTest( "{}/{} test case needed".format(grains["os_family"], grains["os"]) ) try: # Run the state to add the repo ret = self.run_state("pkgrepo.managed", **kwargs) self.assertSaltTrueReturn(ret) # Run again with modified comments kwargs["comments"].append("This is another comment") ret = self.run_state("pkgrepo.managed", **kwargs) self.assertSaltTrueReturn(ret) ret = ret[next(iter(ret))] self.assertEqual( ret["changes"], { "comments": { "old": ["This is a comment"], "new": ["This is a comment", "This is another comment"], } }, ) # Run a third time, no changes should be made ret = self.run_state("pkgrepo.managed", **kwargs) self.assertSaltTrueReturn(ret) ret = ret[next(iter(ret))] self.assertFalse(ret["changes"]) self.assertEqual( ret["comment"], "Package repo '{}' already configured".format(kwargs["name"]), ) finally: # Clean up self.run_state("pkgrepo.absent", copr=kwargs["copr"])
00ee5eed1d75417faaaa185e27947b268239698e
254
https://github.com/saltstack/salt.git
757
def test_pkgrepo_05_copr_with_comments(self, grains): kwargs = {} if grains["os_family"] == "RedHat": if ( grains["osfinger"] == "CentOS Linux-7" or grains["osfinger"] == "Amazon Linux-2" or grains["os"] == "VMware Photon OS" ): self.skipTest("copr plugin not installed on Centos 7 CI")
15
465
test_pkgrepo_05_copr_with_comments
42
0
1
3
python/ray/tune/suggest/__init__.py
132,299
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
7
Python
40
__init__.py
def _import_hebo_search(): from ray.tune.suggest.hebo import HEBOSearch return HEBOSearch SEARCH_ALG_IMPORT = { "variant_generator": _import_variant_generator, "random": _import_variant_generator, "ax": _import_ax_search, "dragonfly": _import_dragonfly_search, "skopt": _import_skopt_search, "hyperopt": _import_hyperopt_search, "bayesopt": _import_bayesopt_search, "bohb": _import_bohb_search, "nevergrad": _import_nevergrad_search, "optuna": _import_optuna_search, "zoopt": _import_zoopt_search, "sigopt": _import_sigopt_search, "hebo": _import_hebo_search, "blendsearch": _import_blendsearch_search, "cfo": _import_cfo_search, }
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
16
https://github.com/ray-project/ray.git
90
def _import_hebo_search(): from ray.tune.suggest.hebo import HEBOSearch return HEBOSearch SEARCH_ALG_IMPORT = { "variant_generator": _import_variant_generator, "random": _import_variant_generator, "ax": _import_ax_search, "dragonfly": _import_dragonfly_search, "skopt": _import_skopt_search, "hyperopt": _import_hyperopt_search, "bayesopt": _import_bayesopt_search, "bohb": _import_bohb_search, "nevergrad": _import_nevergrad_search, "optuna": _import_optuna_search, "zoopt": _import_zoopt_search, "sigopt": _import_sigopt_search, "hebo": _import_hebo_search, "blendsearch": _import_blendsearch_search, "cfo": _import_cfo_s
20
136
_import_hebo_search
74
0
1
16
ivy_tests/test_core/test_container.py
213,714
renamed dev_str arg to dev for all methods.
ivy
15
Python
40
test_container.py
def test_container_find_sub_structure(dev, call): dict_in = {'a': ivy.array([1], dev=dev), 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}} top_cont = Container(dict_in) # full sub_cont = Container({'c': ivy.array([4], dev=dev), 'd': ivy.array([5], dev=dev)}) assert not top_cont.find_sub_container(sub_cont) found_kc = top_cont.find_sub_structure(sub_cont) assert found_kc == 'b' found_kc = top_cont.find_sub_structure(top_cont) assert found_kc == '' # partial partial_sub_cont = Container({'d': ivy.array([5], dev=dev)}) found_kc = top_cont.find_sub_structure(partial_sub_cont, partial=True) assert found_kc == 'b' partial_sub_cont = Container({'b': {'d': ivy.array([5], dev=dev)}}) found_kc = top_cont.find_sub_structure(partial_sub_cont, partial=True) assert found_kc == ''
d743336b1f3654cd0315f380f43eed4116997c1d
211
https://github.com/unifyai/ivy.git
135
def test_container_find_sub_structure(dev, call): dict_in = {'a': ivy.array([1], dev=dev), 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}} top_cont = Container(dict_in) # full sub_cont = Container({'c': ivy.array([4], dev=dev), 'd': ivy.array([5], dev=dev)}) assert not top_cont.find_sub_container(sub_cont) found_kc = top_cont.find_sub_structure(sub_cont) assert found_kc == 'b' found_kc = top_cont.find_sub_structure(top_cont) assert found_kc == '' # partial partial_sub_cont = Container({'d': ivy.array([5], dev=dev)}) found_kc = top_cont.find_sub_structure(partial_sub_cont, partial=True) assert found_kc == 'b' partial_sub_cont = Container({'b
14
346
test_container_find_sub_structure
9
0
1
6
tests/acceptance/test_replay_detail.py
85,952
test(replay): Create Acceptance tests for Replay Details and List pages (#38724) Can run tests individually with ` pytest tests/acceptance/test_replay_detail.py --no-headless=true` More Testing Tips: https://develop.sentry.dev/testing/#acceptance-tests
sentry
10
Python
9
test_replay_detail.py
def test_simple(self): with self.feature(FEATURE_NAME): self.browser.get(self.path) self.browser.wait_until_not('[data-test-id="loading-indicator"]') self.browser.wait_until_not('[data-test-id="loading-placeholder"]') self.browser.snapshot("replay detail")
0e19363d7def84ab448bc45755d9fe4d7d336fd2
47
https://github.com/getsentry/sentry.git
59
def test_simple(self): with self.feature(FEATURE_NAME): self.brows
9
85
test_simple
38
0
1
14
tests/snuba/api/endpoints/test_group_events.py
88,823
feat(querybuilder): Use query builder in group events endpoint (#41276) - This adds a feature flag where the query buildere will be used so it can be gradually rolled out - this is the last place that get_filter is being used, if this change is successful the entire get_filter function and all it depends on can be deleted
sentry
14
Python
29
test_group_events.py
def test_perf_issue(self): event_data = load_data( "transaction", fingerprint=[f"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1"], ) event_1 = self.store_event(data=event_data, project_id=self.project.id) event_2 = self.store_event(data=event_data, project_id=self.project.id) self.login_as(user=self.user) url = f"/api/0/issues/{event_1.groups[0].id}/events/" response = self.do_request(url) assert response.status_code == 200, response.content assert sorted(map(lambda x: x["eventID"], response.data)) == sorted( [str(event_1.event_id), str(event_2.event_id)] )
08e022578ad68856f3ae820c68d2b0f4d6dc4f74
124
https://github.com/getsentry/sentry.git
140
def test_perf_issue(self): event_data = load_data( "transaction", fingerprint=[f"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1"], ) event_1 = self.store_event(data=event_data, project_id=self.project.id) event_2 = self.store_event(data=event_data, project_id=self.project.id) self.login_as(user=self.user)
28
218
test_perf_issue
10
0
1
4
rllib/core/optim/tests/test_rl_optimizer.py
137,349
[RLlib] New `RLOptimizer` API for local (torch+tf) optimizers and losses. Used in combination with RLModules. Initial PR. (#29737)
ray
15
Python
10
test_rl_optimizer.py
def input_specs_train(self) -> ModelSpec: return ModelSpec( dict(self._default_inputs(), **{"actions": TorchTensorSpec("b")}), )
ca3d89139afb887a01948106c2bceb7f02a944c0
30
https://github.com/ray-project/ray.git
34
def input_specs_train(self) -> ModelSpec: return ModelSpec( dict(self._default_inputs(), *
6
52
input_specs_train
13
0
1
11
tests/mixed_int8/test_mixed_int8.py
32,960
Supporting seq2seq models for `bitsandbytes` integration (#18579) * Supporting seq2seq models for `bitsandbytes` integration - `bitsandbytes` integration supports now seq2seq models - check if a model has tied weights as an additional check * small modification - tie the weights before looking at tied weights!
transformers
8
Python
10
test_mixed_int8.py
def tearDown(self): r del self.base_model del self.sequence_model del self.model_8bit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache()
a5ca56ff158075351149220319c14dde555a86f5
35
https://github.com/huggingface/transformers.git
61
def tearDown(self): r del self.base_model del self.sequence_model del self.model_8bit del self.se
11
58
tearDown
161
0
16
31
homeassistant/components/light/__init__.py
295,405
Add EntityFeature enum to Light (#69103) Co-authored-by: Paulus Schoutsen <balloob@gmail.com>
core
13
Python
77
__init__.py
def state_attributes(self): if not self.is_on: return None data = {} supported_features = self.supported_features color_mode = self._light_internal_color_mode if color_mode not in self._light_internal_supported_color_modes: # Increase severity to warning in 2021.6, reject in 2021.10 _LOGGER.debug( "%s: set to unsupported color_mode: %s, supported_color_modes: %s", self.entity_id, color_mode, self._light_internal_supported_color_modes, ) data[ATTR_COLOR_MODE] = color_mode if color_mode in COLOR_MODES_BRIGHTNESS: data[ATTR_BRIGHTNESS] = self.brightness elif supported_features & SUPPORT_BRIGHTNESS: # Backwards compatibility for ambiguous / incomplete states # Add warning in 2021.6, remove in 2021.10 data[ATTR_BRIGHTNESS] = self.brightness if color_mode == COLOR_MODE_COLOR_TEMP: data[ATTR_COLOR_TEMP] = self.color_temp if color_mode in COLOR_MODES_COLOR or color_mode == COLOR_MODE_COLOR_TEMP: data.update(self._light_internal_convert_color(color_mode)) if supported_features & SUPPORT_COLOR_TEMP and not self.supported_color_modes: # Backwards compatibility # Add warning in 2021.6, remove in 2021.10 data[ATTR_COLOR_TEMP] = self.color_temp if supported_features & SUPPORT_WHITE_VALUE and not self.supported_color_modes: # Backwards compatibility # Add warning in 2021.6, remove in 2021.10 data[ATTR_WHITE_VALUE] = self.white_value if self.hs_color is not None: data.update(self._light_internal_convert_color(COLOR_MODE_HS)) if supported_features & LightEntityFeature.EFFECT: data[ATTR_EFFECT] = self.effect return {key: val for key, val in data.items() if val is not None}
ea148a1b8ea611b07b606a4bfef44f66db8b2582
209
https://github.com/home-assistant/core.git
539
def state_attributes(self): if not self.is_on: return None data = {} supported_features = self.supported_features color_mode = self._light_internal_color_mode if color_mode not in self._light_internal_supported_color_modes: # Increase severity to warning in 2021.6, reject in 2021.10 _LOGGER.debug( "%s: set to unsupported color_mode: %s, supported_color_modes: %s", self.entity_id, color_mode, self._ligh
36
334
state_attributes
48
0
2
10
TTS/tts/models/vits.py
262,261
Update Vits model API
TTS
12
Python
31
vits.py
def spec_to_mel(spec, n_fft, num_mels, sample_rate, fmin, fmax): global mel_basis dtype_device = str(spec.dtype) + "_" + str(spec.device) fmax_dtype_device = str(fmax) + "_" + dtype_device if fmax_dtype_device not in mel_basis: mel = librosa_mel_fn(sample_rate, n_fft, num_mels, fmin, fmax) mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) mel = torch.matmul(mel_basis[fmax_dtype_device], spec) mel = amp_to_db(mel) return mel
00c7600103ee34ac50506af88f1b34b713f849e7
112
https://github.com/coqui-ai/TTS.git
86
def spec_to_mel(spec, n_fft, num_mels, sample_rate, fmin, fmax): global mel_basis dtype_device = str(spec.dtype) + "_" + str(spec.device) fmax_dtype_device = str(fmax) + "_" + dtype_device if fmax_dtype_device not in mel_basis: mel = librosa_mel_fn(sample_rate, n_fft, num_mels, fmin, fmax) mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) mel = torch.matmul(mel_basis[fmax_dtype_device], spec) mel = amp_to_db(mel) return mel
20
171
spec_to_mel
95
0
6
20
pandas/core/groupby/groupby.py
166,298
ENH: Add numeric_only to certain groupby ops (#46728)
pandas
14
Python
76
groupby.py
def _python_agg_general(self, func, *args, raise_on_typeerror=False, **kwargs): func = com.is_builtin_func(func) f = lambda x: func(x, *args, **kwargs) # iterate through "columns" ex exclusions to populate output dict output: dict[base.OutputKey, ArrayLike] = {} if self.ngroups == 0: # agg_series below assumes ngroups > 0 return self._python_apply_general(f, self._selected_obj, is_agg=True) for idx, obj in enumerate(self._iterate_slices()): name = obj.name try: # if this function is invalid for this dtype, we will ignore it. result = self.grouper.agg_series(obj, f) except TypeError: if raise_on_typeerror: raise warn_dropping_nuisance_columns_deprecated(type(self), "agg") continue key = base.OutputKey(label=name, position=idx) output[key] = result if not output: return self._python_apply_general(f, self._selected_obj) return self._wrap_aggregated_output(output)
4a072fa0d0d34e83a0d80b1080846bf708bd7177
167
https://github.com/pandas-dev/pandas.git
332
def _python_agg_general(self, func, *args, raise_on_typeerror=False, **kwargs): func = com.is_builtin_func(func) f = lambda x: func(x, *args, **kwargs) # iterate through "columns" ex exclusions to populate output dict output: dict[base.OutputKey, ArrayLike] = {} if self.ngroups == 0: # agg_series below assumes ngroups > 0 return self._python_apply_general(f, self._selected_obj, is_agg=True) for idx, obj in enumerate(self._iterate_slices()): name = obj.name try: # if this function is invalid for this dtype, we will ignore it. result = self.grouper.agg_series(obj, f) except TypeError: if raise_on_typeerror: raise warn_dropping_nuisance_columns_deprecated(type(self), "agg") continue key = base.OutputKey(label=name, position=idx)
34
260
_python_agg_general
24
0
1
12
test/lib/ansible_test/_internal/ci/local.py
268,096
ansible-test - Avoid use of deprecated type hints. (#78456) * ansible-test - Avoid use of deprecated type hints. PEP 585 deprecated many container types in the `typing` module in favor of the actual types, which support subscripting as of Python 3.9. Conversion of `t.Type` was skipped since PyCharm does not currently recognize it. * ansible-test - Fix `t` and `c` imports/shadowing.
ansible
10
Python
18
local.py
def prepare_core_ci_auth(self) -> dict[str, t.Any]: path = self._get_aci_key_path() auth_key = read_text_file(path).strip() request = dict( key=auth_key, nonce=None, ) auth = dict( remote=request, ) return auth
85acf4d1e55e95c266a35c49f74af3c0f251de08
56
https://github.com/ansible/ansible.git
113
def prepare_core_ci_auth(self) -> dict[str, t.Any]: path =
16
90
prepare_core_ci_auth
41
0
3
20
python/ray/experimental/dag/dag_node.py
138,787
[DAG] add basic plotting on Ray DAGs (#24223) To add basic plotting feature for Ray DAGs. `ray.experimental.dag.plot(dag: DAGNode, to_file=None)` ### Behavior 1. dump the dag plot (Dot) to file. 2. also render the image whenever possible. E.g. if running in Jupyter notebook, the image will not only be saved, but also rendered in the notebook. 3. when to_file is not set (i.e. None), it will be saved to a tempfile for rendering purpose only. This is common when users plot DAGs in notebook env to explore the DAG structure without wanting to save it to a file.
ray
11
Python
35
dag_node.py
def _get_all_child_nodes(self) -> List["DAGNode"]: scanner = _PyObjScanner() # we use List instead of Set here, reason explained # in `_get_toplevel_child_nodes`. children = [] for n in scanner.find_nodes( [ self._bound_args, self._bound_kwargs, self._bound_other_args_to_resolve, ] ): if n not in children: children.append(n) return children
5c06e3f14900e3812061416759c25ff2b88c8a23
57
https://github.com/ray-project/ray.git
190
def _get_all_child_nodes(self) -> List["DAGNode"]: scanner = _PyObjScanner() # we use List instead of Set here, reason explained # in `_get_toplevel_child_nodes`. children = [] for n in scanner.find_nodes( [ self._bound_args, self._bound_kwargs, self._bound_other_arg
12
94
_get_all_child_nodes
9
0
1
3
octavia-cli/octavia_cli/list/listings.py
3,523
🐙 octavia-cli: add command to list existing sources, destinations and connections (#9642)
airbyte
11
Python
9
listings.py
def __repr__(self): items = [formatting.format_column_names(self.fields_to_display)] + self.get_listing() return formatting.display_as_table(items)
e05dfd1bcdf59f03992b8ff5ce938fdeb9403959
30
https://github.com/airbytehq/airbyte.git
22
def __repr__(self): items = [formatting.format_column_names(self.fields_to_display)] + self.get_listing() return formatting.display_as_table(items)
8
49
__repr__
29
0
1
6
tests/pytests/functional/modules/file/test_replace.py
215,839
Use str on pathlib paths
salt
9
Python
23
test_replace.py
def test_no_backup(file, multiline_file): # Backup file should NOT be created bak_file = "{}.bak".format(multiline_file) assert "Salticus" not in multiline_file.read_text() file.replace(str(multiline_file), "Etiam", "Salticus", backup=False) assert "Salticus" in multiline_file.read_text() assert not os.path.exists(bak_file)
3c8a341d60559867e72ef944ba15d3531e6de383
59
https://github.com/saltstack/salt.git
46
def test_no_backup(file, multiline_file): # Backup file shoul
12
102
test_no_backup
16
0
2
5
d2l/paddle.py
158,027
[Paddle]Add chapter_natural-language-processing-pretraining (#1177) * [PAddle] * add a fuction for chap14 * add show_list_len_pair_hist function for every framework * fix the bug * change torch to paddle * Fix import issue and repeat_interleave issue * Update paddle.py * remove bert pretraining unused comments * restore train ch11 missing code * rerun subward embeddings * Update subword-embedding.md * Update bert.md * Update word2vec-pretraining.md * Update build.yml Co-authored-by: w5688414 <w5688414@gmail.com>
d2l-zh
10
Python
14
paddle.py
def __getitem__(self, tokens): indices = [self.token_to_idx.get(token, self.unknown_idx) for token in tokens] vecs = self.idx_to_vec[d2l.tensor(indices)] return vecs
777611a5157e4f2d28598eb8d062f6c1bfd906bf
42
https://github.com/d2l-ai/d2l-zh.git
54
def __getitem__(self, tokens): indices = [self.token_to_idx.get(token, self.unknown_idx) for token in tokens] vecs = self.idx_to_vec
12
63
__getitem__
9
0
1
3
rllib/algorithms/registry.py
137,476
[RLlib] Deprecate (delete) `contrib` folder. (#30992)
ray
9
Python
9
registry.py
def _import_a3c(): import ray.rllib.algorithms.a3c as a3c return a3c.A3C, a3c.A3C.get_default_config()
64d744b4750b749cede563b04c5d32396470a236
26
https://github.com/ray-project/ray.git
14
def _import_a3c(): import ray.rllib.algorithms.a3c as a3c return a3c.A3C, a3c.A
7
40
_import_a3c
14
0
1
4
tests/util/test_network.py
313,294
Add is_ipv4_address and is_ipv6_address utils (#66472)
core
9
Python
8
test_network.py
def test_is_ipv6_address(): assert network_util.is_ipv6_address("::1") is True assert network_util.is_ipv6_address("8.8.8.8") is False assert network_util.is_ipv6_address("8.8.8.8") is False
ffcac67d9950f569573a76c6431243c6eb5f1671
32
https://github.com/home-assistant/core.git
26
def test_is_ipv6_address(): assert network_util.is_ipv6_ad
3
60
test_is_ipv6_address
12
0
1
10
python3.10.4/Lib/encodings/hz.py
216,978
add python 3.10.4 for windows
XX-Net
11
Python
12
hz.py
def getregentry(): return codecs.CodecInfo( name='hz', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
8198943edd73a363c266633e1aa5b2a9e9c9f526
46
https://github.com/XX-net/XX-Net.git
66
def getregentry():
15
69
getregentry
22
1
1
5
numpy/lib/tests/test_io.py
159,763
Port over tests from npreadtext test suite - Add test for parsing scientific notation. - Add multiple-char comment test. - Port over tests for structured dtypes. - Add tests for exceptions on skiprows/max_rows. - port over ndmin tests. - Make structured data reusable, add unpack tests. - Port over delimiter tests. - Port over maxrows test w/ various dtypes. - Port over test of exception msg on parse failure. - Port over test for converters w/neg indices. - Port over usecols tests - Port over unicode tests. - Port over more converter tests. - Port over test for large rows. - Port over test for string-len discovery. - Port over float conversion accuracy test. - Port over bool test. - Add test for implicit float->int conversion. - Port over complex parsing tests. - Port over tests for reading from generator. - Port over object cleanup test. - Port over bytes incompat test. - Port over converters tests. Co-authored-by: Warren Weckesser <warren.weckesser@gmail.com> Co-authored-by: Sebastian Berg <sebastian@sipsolutions.net>
numpy
12
Python
21
test_io.py
def test_loadtxt_maxrows_no_blank_lines(dtype): txt = TextIO("1.5,2.5\n3.0,4.0\n5.5,6.0") res = np.loadtxt(txt, dtype=dtype, delimiter=",", max_rows=2) assert_equal(res.dtype, dtype) assert_equal(res, np.array([["1.5", "2.5"], ["3.0", "4.0"]], dtype=dtype)) @pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2")))
66a61b03658f3c9f312505dcf7eab07e4cf91ac6
@pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2")))
66
https://github.com/numpy/numpy.git
32
def test_loadtxt_maxrows_no_blank_lines(dtype): txt = TextIO("1.5,2.5\n3.0,4.0\n5.
14
154
test_loadtxt_maxrows_no_blank_lines
21
0
2
8
src/streamlink/plugin/api/validate/_validators.py
187,102
plugin.api.validate: turn module into package Turn module into package with multiple logical sub-modules: - Define a public interface in the package's `__init__` module - Split validation schemas, validators and validate logic - schemas: classes which register attributes used by their respective `validate` implementations - validators: functions which can internally call `validate` and which return something that can be validated - validate: singledispatch functions which implement the validation logic for schemas and various other types - Rename validation schemas for better internal references - Rename singledispatch methods Other clean-up work: - Update comments and fix grammar - Add type annotations - Use f-strings - Use `str` instead of the `text` alias - Simplify some code blocks - Rearrange classes and functions - Rephrase certain error messages - Add a few more tests for better code coverage
streamlink
11
Python
19
_validators.py
def validator_url(**attributes) -> Callable[[str], bool]: # Convert "http" to AnySchema("http", "https") for convenience if attributes.get("scheme") == "http": attributes["scheme"] = AnySchema("http", "https")
120c10302381600abb4044083ce0a106b31df8f0
41
https://github.com/streamlink/streamlink.git
37
def validator_url(**attributes) -> Callable[[str], bool]: # Convert "http" to AnySchema("http", "https") for convenience if attributes.get("scheme") == "http": attributes["scheme"] = Any
7
70
validator_url
105
0
1
38
tests/rpc/test_rpc_telegram.py
148,908
fix stake amt
freqtrade
12
Python
84
test_rpc_telegram.py
def test_send_msg_buy_notification(default_conf, mocker, caplog) -> None: msg = { 'type': RPCMessageType.BUY, 'trade_id': 1, 'buy_tag': 'buy_signal_01', 'exchange': 'Binance', 'pair': 'ETH/BTC', 'limit': 1.099e-05, 'order_type': 'limit', 'stake_amount': 0.01465333, 'stake_amount_fiat': 0.0, 'stake_currency': 'BTC', 'fiat_currency': 'USD', 'current_rate': 1.099e-05, 'amount': 1333.3333333333335, 'open_date': arrow.utcnow().shift(hours=-1) } telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) telegram.send_msg(msg) assert msg_mock.call_args[0][0] \ == '\N{LARGE BLUE CIRCLE} *Binance:* Buying ETH/BTC (#1)\n' \ '*Buy Tag:* `buy_signal_01`\n' \ '*Amount:* `1333.33333333`\n' \ '*Open Rate:* `0.00001099`\n' \ '*Current Rate:* `0.00001099`\n' \ '*Total:* `(0.01465333 BTC, 180.895 USD)`' freqtradebot.config['telegram']['notification_settings'] = {'buy': 'off'} caplog.clear() msg_mock.reset_mock() telegram.send_msg(msg) msg_mock.call_count == 0 log_has("Notification 'buy' not sent.", caplog) freqtradebot.config['telegram']['notification_settings'] = {'buy': 'silent'} caplog.clear() msg_mock.reset_mock() telegram.send_msg(msg) msg_mock.call_count == 1 msg_mock.call_args_list[0][1]['disable_notification'] is True
60d1e7fc6578e57ebd27ad05b37e4de63e1ed20f
231
https://github.com/freqtrade/freqtrade.git
310
def test_send_msg_buy_notification(default_conf, mocker, caplog) -> None: msg = { 'type': RPCMessageType.BUY, 'trade_id': 1, 'buy_tag': 'buy_signal_01', 'exchange': 'Binance', 'pair': 'ETH/BTC', 'limit': 1.099e-05, 'order_type': 'limit', 'stake_amount': 0.01465333, 'stake_amount_fiat': 0.0, 'stake_currency': 'BTC', 'fiat_currency': 'USD', 'current_rate': 1.099e-05, 'amount': 1333.3333333333335, 'open_date': arrow.utcnow().shift(hours=-1) } telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) telegram.send_msg(msg) assert msg_mock.call_args[0][0] \ == '\N{LARGE BLUE CIRCLE} *Binance:* Buying ETH/BTC (#1)\n' \ '*Buy Tag:* `buy_signal_01`\n' \ '*Amount:* `1333.33333333`\n' \ '*Open Rate:* `0.00001099`\n' \ '*Current Rate:* `0.00001099`\n' \ '*Total:* `(0.01465333 BTC, 180.895 USD)`' freqtradebot.config['telegram']['notification_settings'] = {'buy': 'off'} caplog.clear() msg_mock.reset_mock() telegram.send_msg(msg) msg_mock.call_count == 0 log_has("Notification 'buy' not sent.", caplog) freqtradebot.config['telegram']['notification_settings'] = {'buy': 'silent'} ca
23
387
test_send_msg_buy_notification
10
0
1
3
src/transformers/models/data2vec/convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.py
35,657
Add Data2Vec (#15507) * Add data2vec model cloned from roberta * Add checkpoint conversion script * Fix copies * Update docs * Add checkpoint conversion script * Remove fairseq data2vec_text script and fix format * Add comment on where to get data2vec_text.py * Remove mock implementation cheat.py and fix style * Fix copies * Remove TF and Flax classes from init * Add back copy from fairseq data2vec_text.py and fix style * Update model name in docs/source/index.mdx to be CamelCase * Revert model name in table to lower-case to get check_table test to pass * Update src/transformers/models/data2vec/__init__.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/data2vec/convert_data2vec_original_pytorch_checkpoint_to_pytorch.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update docs/source/model_doc/data2vec.mdx Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update docs/source/model_doc/data2vec.mdx Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/auto/configuration_auto.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/data2vec/configuration_data2vec.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update tests/test_modeling_data2vec.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/data2vec/configuration_data2vec.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update documentation * Copy-paste Data2VecConfig from BertConfig * Update config checkpoint to point to edugp/data2vec-nlp-base. Fix style and repo-consistency * Update config special tokens to match RoBERTa * Split multiple assertions and add individual error messages * Rename Data2VecModel to Data2VecForTextModel * Add Data2Vec to _toctree.yml * Rename Data2VecEmbeddings to Data2VecForTextEmbeddings * Add initial Data2VecForAudio model (unfinished). Only matching fairseq's implementation up to the feature encoder (before positional encoding). * finish audio model * finish audio file * Update names and fix style, quality and repo consistency * Remove Data2VecAudioForPretraining. Add tests for Data2VecAudio, mimicking the Wav2Vec2 test suite. Fix bias initilization in positional conv layers. Move back configurations for audio and text to separate files. * add inputs to logits to data2vec' * correct autio models * correct config auto * correct tok auto * Update utils/tests_fetcher.py * delete unnecessary files * delete unnecessary files * further renaming * make all tests pass * finish * remove useless test file * Update tests/test_modeling_common.py * Update utils/check_repo.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/data2vec/modeling_data2vec_text.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Fix copies * Update docs * Remove fairseq data2vec_text script and fix format * Add comment on where to get data2vec_text.py * Remove mock implementation cheat.py and fix style * Fix copies * Remove TF and Flax classes from init * Add back copy from fairseq data2vec_text.py and fix style * Update model name in docs/source/index.mdx to be CamelCase * Revert model name in table to lower-case to get check_table test to pass * Update documentation * Update src/transformers/models/data2vec/__init__.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/data2vec/convert_data2vec_original_pytorch_checkpoint_to_pytorch.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/auto/configuration_auto.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/data2vec/configuration_data2vec.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update tests/test_modeling_data2vec.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/data2vec/configuration_data2vec.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Copy-paste Data2VecConfig from BertConfig * Update config checkpoint to point to edugp/data2vec-nlp-base. Fix style and repo-consistency * Update config special tokens to match RoBERTa * Split multiple assertions and add individual error messages * Rename Data2VecModel to Data2VecForTextModel * Add Data2Vec to _toctree.yml * Rename Data2VecEmbeddings to Data2VecForTextEmbeddings * Add initial Data2VecForAudio model (unfinished). Only matching fairseq's implementation up to the feature encoder (before positional encoding). * finish audio model * finish audio file * add inputs to logits to data2vec' * Update names and fix style, quality and repo consistency * Remove Data2VecAudioForPretraining. Add tests for Data2VecAudio, mimicking the Wav2Vec2 test suite. Fix bias initilization in positional conv layers. Move back configurations for audio and text to separate files. * correct autio models * correct config auto * correct tok auto * delete unnecessary files * delete unnecessary files * Update utils/tests_fetcher.py * further renaming * make all tests pass * finish * remove useless test file * Update tests/test_modeling_common.py * Update utils/check_repo.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/data2vec/modeling_data2vec_text.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Move data2vec tests to new structure * Fix test imports for text tests * Remove fairseq files * Change paper link to arxiv * Modify Data2Vec documentation to reflect that the encoder is not shared across the audio and text models in the current implementation. * Update text model checkpoint to be facebook/data2vec-text-base * Add 'Copy from' statements and update paper links and docs * fix copy from statements * improve copied from * correct more copied from statements * finish copied from stuff * make style * add model to README * add to master Co-authored-by: Eduardo Gonzalez Ponferrada <eduardo@ferrumhealth.com> Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
transformers
9
Python
10
convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.py
def access_by_string(module, path): names = path.split(".") return reduce(getattr, names, module)
df5a4094a6e3f98f2cb2058cdb688fcc3f453220
24
https://github.com/huggingface/transformers.git
15
def access_by_string(module, path): names = path.split(".") return reduce(getattr, names, module)
7
38
access_by_string
14
0
1
5
tests/auth_tests/test_views.py
201,589
Refs #33476 -- Reformatted code with Black.
django
9
Python
13
test_views.py
def test_redirect_param(self): self.login() url = self.do_redirect_url + "?next=/custom_next/" response = self.client.get(url) self.assertRedirects(response, "/custom_next/", fetch_redirect_response=False)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
40
https://github.com/django/django.git
49
def test_redirect_param(self): self.login() url = self.do_redirect_url + "?next=/custom_next/" response = self.client.get(url) self.assertRedirects(response, "/custo
10
70
test_redirect_param
6
0
1
3
homeassistant/components/telegram_bot/polling.py
295,360
Refactor telegram_bot polling/webhooks platforms and add tests (#66433) Co-authored-by: Pär Berge <paer.berge@gmail.com>
core
8
Python
6
polling.py
def start_polling(self, event=None): _LOGGER.debug("Starting polling") self.updater.start_polling()
d7375f1a9c4a69858a65a56bd524f5a78ecab23c
23
https://github.com/home-assistant/core.git
27
def start_polling(self, event=None):
6
42
start_polling
10
0
1
3
tests/backends/postgresql/test_creation.py
201,733
Refs #33476 -- Reformatted code with Black.
django
9
Python
10
test_creation.py
def test_sql_table_creation_suffix_with_encoding(self): settings = {"CHARSET": "UTF8"} self.check_sql_table_creation_suffix(settings, "WITH ENCODING 'UTF8'")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
20
https://github.com/django/django.git
23
def test_sql_table_creation_suffix_with_encoding(self): settings = {"CHARSET": "UTF8"} self.check_sql_table_creation_suffix(settings, "WITH ENCODING
4
38
test_sql_table_creation_suffix_with_encoding
9
0
1
3
airbyte-integrations/connectors/source-recurly/unit_tests/test_streams.py
4,176
🎉 Recurly Schema Revamp (#9866) * Cleanup Recurly connector schemas * Add more Recurly schemas to the connector - `billing_infos` - `shipping_addresses` - `shipping_methods` - `subscription_changes` * Add Recurly `add-on` resouce * Add Recurly's account notes resource schema * Add unique coupons to Recurly source * Add credit payments to Recurly connector * Add Recurly resources to integration tests configurations * Bump Recurly source version to `0.4.0` * Add `line_items` Recurly resource * Add `line_items` to Recurly documentation * Add missing `line_items` JSON schema * Replace Subscription Change Recurly API call with Subscription `pending_changes` field * Replace Recurly unique coupon codes API call with coupons `unique_coupon` field To avoid the extra API call to import unique coupon calls * Revert "Replace Recurly unique coupon codes API call with coupons `unique_coupon` field" This reverts commit 1c4592d82da3c5e5e0026dda8eb2ed7a896ac5b8. * Add `end_time` parameter to Recurly connector * Order Recurly specs * Set the Recurly `begin_time` and `end_time` to be optional * Add `order` to Recurly `source_spec.yaml` * Add `maxLength` to Recurly source schemas * Set `maxLength` for Recurly Subscription and Transaction `uuid` * Fix Recurly `export_dates` acceptance tests
airbyte
10
Python
9
test_streams.py
def test_billing_infos_client_method_name(self): stream = BillingInfos(client=self.client_mock) assert stream.client_method_name == "list_billing_infos"
63af98e3b999d4b223237b51472a819915c5a558
21
https://github.com/airbytehq/airbyte.git
22
def test_billing_infos_client_method_name(self): stream = BillingInfos(client=self.client_mock) assert stream.client_meth
7
36
test_billing_infos_client_method_name