ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
23,671
109,612
271
lib/matplotlib/axes/_base.py
77
22
def set_aspect(self, aspect, adjustable=None, anchor=None, share=False): if cbook._str_equal(aspect, 'equal'): aspect = 1 if not cbook._str_equal(aspect, 'auto'): aspect = float(aspect) # raise ValueError if
Update _base.py
set_aspect
9d616615417eac104e12f2915f3fe875177bb2e4
matplotlib
_base.py
14
20
https://github.com/matplotlib/matplotlib.git
10
146
0
50
232
Python
{ "docstring": "\n Set the aspect ratio of the axes scaling, i.e. y/x-scale.\n\n Parameters\n ----------\n aspect : {'auto', 'equal'} or float\n Possible values:\n\n - 'auto': fill the position rectangle with data.\n - 'equal': same as ``aspect=1``, i.e. same scaling for x and y.\n - *float*: The displayed size of 1 unit in y-data coordinates will\n be *aspect* times the displayed size of 1 unit in x-data\n coordinates; e.g. for ``aspect=2`` a square in data coordinates\n will be rendered with a height of twice its width.\n\n adjustable : None or {'box', 'datalim'}, optional\n If not ``None``, this defines which parameter will be adjusted to\n meet the required aspect. See `.set_adjustable` for further\n details.\n\n anchor : None or str or (float, float), optional\n If not ``None``, this defines where the Axes will be drawn if there\n is extra space due to aspect constraints. The most common way to\n to specify the anchor are abbreviations of cardinal directions:\n\n ===== =====================\n value description\n ===== =====================\n 'C' centered\n 'SW' lower left corner\n 'S' middle of bottom edge\n 'SE' lower right corner\n etc.\n ===== =====================\n\n See `~.Axes.set_anchor` for further details.\n\n share : bool, default: False\n If ``True``, apply the settings to all shared Axes.\n\n See Also\n --------\n matplotlib.axes.Axes.set_adjustable\n Set how the Axes adjusts to achieve the required aspect ratio.\n matplotlib.axes.Axes.set_anchor\n Set the position in case of extra space.\n ", "language": "en", "n_whitespaces": 618, "n_words": 219, "vocab_size": 140 }
def set_aspect(self, aspect, adjustable=None, anchor=None, share=False): if cbook._str_equal(aspect, 'equal'): aspect = 1 if not cbook._str_equal(aspect, 'auto'): aspect = float(aspect) # raise ValueError if necessary if aspect<0: raise ValueError("aspect must be positive") if share: axes = {sibling for name in self._axis_names for sibling in self._shared_axes[name].get_siblings(self)} else: axes = [self] for ax in axes: ax._aspect = aspect if adjustable is None: adjustable = self._adjustable self.set_adjustable(adjustable, share=share) # Handle sharing. if anchor is not None: self.set_anchor(anchor, share=share) self.stale = True
50,145
202,525
144
tests/custom_pk/tests.py
59
12
def test_pk_attributes(self): # pk can be used as a substitute for the primary key. # The primary key can be accessed via the pk property on the model. e = Employee.objects.get(pk=123) self.ass
Refs #33476 -- Reformatted code with Black.
test_pk_attributes
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
10
8
https://github.com/django/django.git
1
51
0
44
89
Python
{ "docstring": "\n pk and attribute name are available on the model\n No default id attribute is added\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 14 }
def test_pk_attributes(self): # pk can be used as a substitute for the primary key. # The primary key can be accessed via the pk property on the model. e = Employee.objects.get(pk=123) self.assertEqual(e.pk, 123) # Or we can use the real attribute name for the primary key: self.assertEqual(e.employee_code, 123) with self.assertRaisesMessage( AttributeError, "'Employee' object has no attribute 'id'" ): e.id
11,152
54,803
294
src/prefect/client.py
95
12
async def __aenter__(self): if self._closed: # httpx.AsyncClient does not allow reuse so we will not either. raise RuntimeError( "The client cannot be started again after closing. "
Disable lifespan management during logging
__aenter__
05b92d7c7f6cf21c5d6033df7242c331fc66b92e
prefect
client.py
14
16
https://github.com/PrefectHQ/prefect.git
5
80
0
65
145
Python
{ "docstring": "\n Start the client.\n\n If the client is already started, this will raise an exception.\n\n If the client is already closed, this will raise an exception. Use a new client\n instance instead.\n ", "language": "en", "n_whitespaces": 67, "n_words": 31, "vocab_size": 19 }
async def __aenter__(self): if self._closed: # httpx.AsyncClient does not allow reuse so we will not either. raise RuntimeError( "The client cannot be started again after closing. " "Retrieve a new client with `get_client()` instead." ) if self._started: # httpx.AsyncClient does not allow reentrancy so we will not either. raise RuntimeError("The client cannot be started more than once.") await self._exit_stack.__aenter__() # Enter a lifespan context if using an ephemeral application. # See https://github.com/encode/httpx/issues/350 if self._ephemeral_app and self.manage_lifespan: self._ephemeral_lifespan = await self._exit_stack.enter_async_context( app_lifespan_context(self._ephemeral_app) ) # Enter the httpx client's context await self._exit_stack.enter_async_context(self._client) self._started = True return self
31,501
138,659
665
rllib/agents/qmix/qmix.py
224
42
def training_iteration(self) -> ResultDict: # Sample n batches from n workers. new_sample_batches = synchronous_parallel_sample( worker_set=self.workers, concat=False ) for batch in new_sample_batches: # Update counters. self._counters[NUM_ENV_STEPS_SAMPLED] += batch.env_steps() self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps() # Store new samples in the replay buffer. self.local_replay_buffer.add(batch) # Sample n batches from replay buffer until the total number of timesteps # reaches `train_batch_size`. train_batch = sample_min_n_steps_from_buffer( replay_buffer=self.local_replay_buffer, min_steps=self.config["train_batch_size"], count_by_agent_steps=self._by_agent_steps, ) if train_batch is None: return {} # Learn on the training batch. # Use simple optimizer (only for multi-agent or tf-eager; all other # cases should use the multi-GPU optimizer, even if only using 1 GPU) if self.config.get("simple_optimizer") is True: train_results = train_one_step(self, train_batch) else: train_results = multi_gpu_train_one_step(self, train_batch) # TODO: Move training steps counter update outside of `train_one_step()` method. # # Update train step counters. # self._counters[NUM_ENV_STEPS_TRAINED] += train_batch.env_steps() # self._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps() # Update target network every `target_network_update_freq` steps.
[RLlib] QMIX training iteration function and new replay buffer API. (#24164)
training_iteration
627b9f2e888b05434bb67f547b390409f26538e7
ray
qmix.py
13
46
https://github.com/ray-project/ray.git
6
238
0
139
397
Python
{ "docstring": "QMIX training iteration function.\n\n - Sample n MultiAgentBatches from n workers synchronously.\n - Store new samples in the replay buffer.\n - Sample one training MultiAgentBatch from the replay buffer.\n - Learn on the training batch.\n - Update the target network every `target_network_update_freq` steps.\n - Return all collected training metrics for the iteration.\n\n Returns:\n The results dict from executing the training iteration.\n ", "language": "en", "n_whitespaces": 128, "n_words": 61, "vocab_size": 40 }
def training_iteration(self) -> ResultDict: # Sample n batches from n workers. new_sample_batches = synchronous_parallel_sample( worker_set=self.workers, concat=False ) for batch in new_sample_batches: # Update counters. self._counters[NUM_ENV_STEPS_SAMPLED] += batch.env_steps() self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps() # Store new samples in the replay buffer. self.local_replay_buffer.add(batch) # Sample n batches from replay buffer until the total number of timesteps # reaches `train_batch_size`. train_batch = sample_min_n_steps_from_buffer( replay_buffer=self.local_replay_buffer, min_steps=self.config["train_batch_size"], count_by_agent_steps=self._by_agent_steps, ) if train_batch is None: return {} # Learn on the training batch. # Use simple optimizer (only for multi-agent or tf-eager; all other # cases should use the multi-GPU optimizer, even if only using 1 GPU) if self.config.get("simple_optimizer") is True: train_results = train_one_step(self, train_batch) else: train_results = multi_gpu_train_one_step(self, train_batch) # TODO: Move training steps counter update outside of `train_one_step()` method. # # Update train step counters. # self._counters[NUM_ENV_STEPS_TRAINED] += train_batch.env_steps() # self._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps() # Update target network every `target_network_update_freq` steps. cur_ts = self._counters[NUM_ENV_STEPS_SAMPLED] last_update = self._counters[LAST_TARGET_UPDATE_TS] if cur_ts - last_update >= self.config["target_network_update_freq"]: to_update = self.workers.local_worker().get_policies_to_train() self.workers.local_worker().foreach_policy_to_train( lambda p, pid: pid in to_update and p.update_target() ) self._counters[NUM_TARGET_UPDATES] += 1 self._counters[LAST_TARGET_UPDATE_TS] = cur_ts # Update weights and global_vars - after learning on the local worker - on all # remote workers. global_vars = { "timestep": self._counters[NUM_ENV_STEPS_SAMPLED], } # Update remote workers' weights and global vars after learning on local worker. with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]: self.workers.sync_weights(global_vars=global_vars) # Return all collected metrics for the iteration. return train_results
@cli.command() @click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
72,929
249,457
105
scripts-dev/release.py
27
11
def _announce() -> None: current_version = get_package_version() tag_name = f"v{current_version}" click.echo( f
Extend the release script to wait for GitHub Actions to finish and to be usable as a guide for the whole process. (#13483)
_announce
c7b18d9d44c90acfd4ceaec1fa2f8275e03f14af
synapse
release.py
11
31
https://github.com/matrix-org/synapse.git
2
42
1
22
147
Python
{ "docstring": "Generate markdown to announce the release.\nHi everyone. Synapse {current_version} has just been released.\n\n[notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) | \\\n[docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \\\n[debs](https://packages.matrix.org/debian/) | \\\n[pypi](https://pypi.org/project/matrix-synapse/{current_version}/)\nAnnounce the RC in\n- #homeowners:matrix.org (Synapse Announcements)\n- #synapse-dev:matrix.org\nAnnounce the release in\n- #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic\n- #synapse:matrix.org (Synapse Admins), bumping the version in the topic\n- #synapse-dev:matrix.org\n- #synapse-package-maintainers:matrix.org\n\nAsk the designated people to do the blog and tweets.", "language": "en", "n_whitespaces": 57, "n_words": 72, "vocab_size": 43 }
def _announce() -> None: current_version = get_package_version() tag_name = f"v{current_version}" click.echo( f ) if "rc" in tag_name: click.echo( ) else: click.echo( ) @cli.command() @click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
53,460
212,852
154
PySimpleGUI.py
45
15
def update(self, value=None, visible=None):
Completed switching all elements over to the new way of handling visiblity
update
ed2bc288ff17344f6406c49623036620f18e65bb
PySimpleGUI
PySimpleGUI.py
12
12
https://github.com/PySimpleGUI/PySimpleGUI.git
6
98
0
32
158
Python
{ "docstring": "\n Changes some of the settings for the Output Element. Must call `Window.Read` or `Window.Finalize` prior\n\n Changes will not be visible in your window until you call window.read or window.refresh.\n\n If you change visibility, your element may MOVE. If you want it to remain stationary, use the \"layout helper\"\n function \"pin\" to ensure your element is \"pinned\" to that location in your layout so that it returns there\n when made visible.\n\n :param value: string that will replace current contents of the output area\n :type value: (str)\n :param visible: control visibility of element\n :type visible: (bool)\n ", "language": "en", "n_whitespaces": 171, "n_words": 94, "vocab_size": 67 }
def update(self, value=None, visible=None): if not self._widget_was_created(): # if widget hasn't been created yet, then don't allow return if value is not None: self._TKOut.output.delete('1.0', tk.END) self._TKOut.output.insert(tk.END, value) if visible is False: self._pack_forget_save_settings(self._TKOut.frame) elif visible is True: self._pack_restore_settings(self._TKOut.frame) if visible is not None: self._visible = visible
34,801
150,631
111
freqtrade/freqai/prediction_models/RLPredictionModel.py
39
8
def example(self): result = getattr(self, "_example", None) if result is None: # No example batch was found, so get one from the `.train` dataset result = next(iter(self.train)) # And cache it for next time self._example = result return result
callback function and TDQN model added
example
01232e9a1f8e28e3611e38af3816edb026600767
freqtrade
RLPredictionModel.py
13
6
https://github.com/freqtrade/freqtrade.git
2
39
0
32
68
Python
{ "docstring": "Get and cache an example batch of `inputs, labels` for plotting.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def example(self): result = getattr(self, "_example", None) if result is None: # No example batch was found, so get one from the `.train` dataset result = next(iter(self.train)) # And cache it for next time self._example = result return result
50,635
204,114
151
django/contrib/gis/measure.py
39
7
def unit_attname(cls, unit_str): lower = unit_str.lower() if unit_str in cls.UNITS: return unit_str elif lower in cls.UNITS: return lower elif lower in cls.LALIAS: return cls.LALIAS[lower] else: raise Exception( 'Could not find a unit keyword associated
Refs #33476 -- Reformatted code with Black.
unit_attname
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
measure.py
12
12
https://github.com/django/django.git
4
56
0
28
93
Python
{ "docstring": "\n Retrieve the unit attribute name for the given unit string.\n For example, if the given unit string is 'metre', return 'm'.\n Raise an exception if an attribute cannot be found.\n ", "language": "en", "n_whitespaces": 59, "n_words": 30, "vocab_size": 22 }
def unit_attname(cls, unit_str): lower = unit_str.lower() if unit_str in cls.UNITS: return unit_str elif lower in cls.UNITS: return lower elif lower in cls.LALIAS: return cls.LALIAS[lower] else: raise Exception( 'Could not find a unit keyword associated with "%s"' % unit_str )
30,776
135,932
63
rllib/tests/test_nn_framework_import_errors.py
30
15
def test_dont_import_tf_error(): # Do n
[RLlib] AlgorithmConfigs: Make None a valid value for methods to set properties; Use new `NotProvided` singleton, instead, to indicate no changes wanted on that property. (#30020)
test_dont_import_tf_error
087548031bcf22dd73364b58acb70e61a49f2427
ray
test_nn_framework_import_errors.py
13
6
https://github.com/ray-project/ray.git
2
58
0
28
108
Python
{ "docstring": "Check error being thrown, if tf not installed but configured.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def test_dont_import_tf_error(): # Do not import tf for testing purposes. os.environ["RLLIB_TEST_NO_TF_IMPORT"] = "1" config = ppo.PPOConfig().environment("CartPole-v1") for _ in framework_iterator(config, frameworks=("tf", "tf2")): with pytest.raises(ImportError, match="However, no installation was found"): config.build()
54,870
217,655
78
python3.10.4/Lib/hmac.py
13
9
def _current(self): if self._hmac: return self._hmac else: h = self._outer.copy()
add python 3.10.4 for windows
_current
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
hmac.py
13
7
https://github.com/XX-net/XX-Net.git
2
40
0
11
69
Python
{ "docstring": "Return a hash object for the current state.\n\n To be used only internally with digest() and hexdigest().\n ", "language": "en", "n_whitespaces": 31, "n_words": 17, "vocab_size": 17 }
def _current(self): if self._hmac: return self._hmac else: h = self._outer.copy() h.update(self._inner.digest()) return h
13,559
64,064
12
erpnext/patches/v13_0/delete_old_sales_reports.py
16
8
def delete_links_from_desktop_icons(report): desktop_icons = frappe.db.get_values("Desktop Icon", {"_report": report}, ["name"]) for desktop_icon in desktop_icons: frappe.delete_doc("Desktop Icon", desktop_icon[0])
fix: broken patches (backport #29067) (#29406) * chore: patch fixes (cherry picked from commit 8b5b146f6d2720587a16f78a8d47840be8dca2b7) # Conflicts: # erpnext/patches/v13_0/make_homepage_products_website_items.py * fix: remove desktop icons while deleting sales reports (cherry picked from commit 5f72026cb932d01fc827c382747e996a94b441fd) * refactor: dont ignore dangerous exceptions in patches (cherry picked from commit 0aa1ea8aeb7757592616bd491de98c69fef08854) * fix: make patch kinda idempotent with previous query rerunning would've caused all values to become 0. * chore: conflicts * fix: check type before patching Co-authored-by: Saurabh <saurabh6790@gmail.com> Co-authored-by: Ankush Menat <ankush@frappe.io>
delete_links_from_desktop_icons
f469ec87d94d4639ff4eb99a45496721c4779bf3
erpnext
delete_old_sales_reports.py
11
4
https://github.com/frappe/erpnext.git
2
42
0
15
73
Python
{ "docstring": " Check for one or multiple Desktop Icons and delete ", "language": "en", "n_whitespaces": 10, "n_words": 9, "vocab_size": 9 }
def delete_links_from_desktop_icons(report): desktop_icons = frappe.db.get_values("Desktop Icon", {"_report": report}, ["name"]) for desktop_icon in desktop_icons: frappe.delete_doc("Desktop Icon", desktop_icon[0])
4,761
24,519
143
ppstructure/table/table_master_match.py
64
18
def get_bboxes_list(end2end_result, structure_master_result): # end2end e
add SLANet
get_bboxes_list
ddaa2c2552e19635cd6cdf38619f1f176c358f89
PaddleOCR
table_master_match.py
10
16
https://github.com/PaddlePaddle/PaddleOCR.git
2
93
0
37
159
Python
{ "docstring": "\n This function is use to convert end2end results and structure master results to\n List of xyxy bbox format and List of xywh bbox format\n :param end2end_result: bbox's format is xyxy\n :param structure_master_result: bbox's format is xywh\n :return: 4 kind list of bbox ()\n ", "language": "en", "n_whitespaces": 62, "n_words": 43, "vocab_size": 26 }
def get_bboxes_list(end2end_result, structure_master_result): # end2end end2end_xyxy_list = [] end2end_xywh_list = [] for end2end_item in end2end_result: src_bbox = end2end_item['bbox'] end2end_xyxy_list.append(src_bbox) xywh_bbox = xyxy2xywh(src_bbox) end2end_xywh_list.append(xywh_bbox) end2end_xyxy_bboxes = np.array(end2end_xyxy_list) end2end_xywh_bboxes = np.array(end2end_xywh_list) # structure master src_bboxes = structure_master_result['bbox'] src_bboxes = remove_empty_bboxes(src_bboxes) # structure_master_xywh_bboxes = src_bboxes # xyxy_bboxes = xywh2xyxy(src_bboxes) # structure_master_xyxy_bboxes = xyxy_bboxes structure_master_xyxy_bboxes = src_bboxes xywh_bbox = xyxy2xywh(src_bboxes) structure_master_xywh_bboxes = xywh_bbox return end2end_xyxy_bboxes, end2end_xywh_bboxes, structure_master_xywh_bboxes, structure_master_xyxy_bboxes
3,424
20,557
145
pipenv/patched/notpip/_vendor/pyparsing/core.py
134
46
def autoname_elements() -> None: for name, var in sys._getframe().f_back.f_locals.items(): if isinstance(var, ParserElement) and not var.customName: var.set_name(name) dbl_quoted_string = Combine( Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' ).set_name("string enclosed in double quotes") sgl_quoted_string = Combine( Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" ).set_name("string enclosed in single quotes") quoted_string = Combine( Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" ).set_name("quotedString using single or double quotes") unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") # build list of built-in expressions, for future reference if a global default value # gets updated _builtin_expr
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
autoname_elements
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
core.py
13
8
https://github.com/pypa/pipenv.git
4
45
0
87
339
Python
{ "docstring": "\n Utility to simplify mass-naming of parser elements, for\n generating railroad diagram with named subdiagrams.\n ", "language": "en", "n_whitespaces": 24, "n_words": 14, "vocab_size": 14 }
def autoname_elements() -> None: for name, var in sys._getframe().f_back.f_locals.items(): if isinstance(var, ParserElement) and not var.customName: var.set_name(name) dbl_quoted_string = Combine( Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' ).set_name("string enclosed in double quotes") sgl_quoted_string = Combine( Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" ).set_name("string enclosed in single quotes") quoted_string = Combine( Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" ).set_name("quotedString using single or double quotes") unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") # build list of built-in expressions, for future reference if a global default value # gets updated _builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)] # backward compatibility names tokenMap = token_map conditionAsParseAction = condition_as_parse_action nullDebugAction = null_debug_action sglQuotedString = sgl_quoted_string dblQuotedString = dbl_quoted_string quotedString = quoted_string unicodeString = unicode_string lineStart = line_start lineEnd = line_end stringStart = string_start stringEnd = string_end traceParseAction = trace_parse_action
21,486
102,171
58
tools/test/test_gen_backend_stubs.py
30
4
def test_valid_zero_ops_doesnt_require_backend_dispatch_key(self) -> None: yaml_str = # External codegen on a yaml file with no operators is effectively a no-op,
Revert "Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels" (#69950) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/69950 This reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa. Test Plan: Imported from OSS Reviewed By: albanD Differential Revision: D33113545 Pulled By: bdhirsh fbshipit-source-id: d6590294662588d36c09662dea65919ad4e1e288
test_valid_zero_ops_doesnt_require_backend_dispatch_key
bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d
pytorch
test_gen_backend_stubs.py
7
6
https://github.com/pytorch/pytorch.git
1
16
0
27
32
Python
{ "docstring": "\\\nbackend: BAD_XLA\ncpp_namespace: torch_xla\nsupported:", "language": "en", "n_whitespaces": 2, "n_words": 6, "vocab_size": 6 }
def test_valid_zero_ops_doesnt_require_backend_dispatch_key(self) -> None: yaml_str = # External codegen on a yaml file with no operators is effectively a no-op, # so there's no reason to parse the backend self.assert_success_from_gen_backend_stubs(yaml_str)
1,809
9,995
322
tests/distributed/test_remote_peas/test_remote_peas.py
132
41
async def test_pseudo_remote_peas_topologies(gateway, head, worker): worker_port = random_port() head_port = random_port() port_expose = random_port() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' if head == 'remote': pods_addresses = f'{{"pod0": ["{HOST}:{head_port}"]}}' else: pods_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}' # create a single head pea head_pea = _create_head_pea(head, head_port) # create a single worker pea worker_pea = _create_worker_pea(worker, worker_port) # create a single gateway pea gateway_pea = _create_gateway_pea( gateway, graph_description, pods_addresses, port_expose ) with gateway_pea, worker_pea, head_pea: await asyncio.sleep(1.0) # this would be done by the Pod, its adding the worker to the head activate_msg = ControlRequest(command='ACTIVATE') worker_host, worker_port = worker_pea.runtime_ctrl_address.split(':') if head == 'remote': worker_host = __docker_host__ activate_msg.add_related_entity('worker', worker_host, int(worker_port)) assert GrpcConnectionPool.send_request_sync( activate_msg, head_pea.runtime_ctrl_address ) # send requests to the gateway c = Client(host='127.0.0.1', port=port_expose, asyncio=True) responses = c.post(
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <bo.wang@jina.ai> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <tobias.jacobowitz@posteo.de> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> Co-authored-by: Deepankar Mahapatro <deepankar.mahapatro@jina.ai> Co-authored-by: bwanglzu <bo.wang@jina.ai> Co-authored-by: AlaeddineAbdessalem <alaeddine-13@live.fr> Co-authored-by: Zhaofeng Miao <522856232@qq.com>
test_pseudo_remote_peas_topologies
933415bfa1f9eb89f935037014dfed816eb9815d
jina
test_remote_peas.py
12
33
https://github.com/jina-ai/jina.git
4
210
0
85
316
Python
{ "docstring": "\n g(l)-h(l)-w(l) - works\n g(l)-h(l)-w(r) - works - head connects to worker via localhost\n g(l)-h(r)-w(r) - works - head (inside docker) connects to worker via dockerhost\n g(l)-h(r)-w(l) - doesn't work remote head need remote worker\n g(r)-... - doesn't work, as distributed parser not enabled for gateway\n After any 1 failure, segfault\n ", "language": "en", "n_whitespaces": 72, "n_words": 50, "vocab_size": 33 }
async def test_pseudo_remote_peas_topologies(gateway, head, worker): worker_port = random_port() head_port = random_port() port_expose = random_port() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' if head == 'remote': pods_addresses = f'{{"pod0": ["{HOST}:{head_port}"]}}' else: pods_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}' # create a single head pea head_pea = _create_head_pea(head, head_port) # create a single worker pea worker_pea = _create_worker_pea(worker, worker_port) # create a single gateway pea gateway_pea = _create_gateway_pea( gateway, graph_description, pods_addresses, port_expose ) with gateway_pea, worker_pea, head_pea: await asyncio.sleep(1.0) # this would be done by the Pod, its adding the worker to the head activate_msg = ControlRequest(command='ACTIVATE') worker_host, worker_port = worker_pea.runtime_ctrl_address.split(':') if head == 'remote': worker_host = __docker_host__ activate_msg.add_related_entity('worker', worker_host, int(worker_port)) assert GrpcConnectionPool.send_request_sync( activate_msg, head_pea.runtime_ctrl_address ) # send requests to the gateway c = Client(host='127.0.0.1', port=port_expose, asyncio=True) responses = c.post( '/', inputs=async_inputs, request_size=1, return_results=True ) response_list = []
1,728
9,848
217
jina/peapods/peas/__init__.py
50
19
async def async_wait_start_success(self): import asyncio _timeout = self.args.timeout_ready if _timeout <= 0: _timeout = None else: _timeout /= 1e3 timeout_ns = 1e9 * _timeout if _timeout else None now = time.time_ns() while timeout_ns is None or time.ti
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <bo.wang@jina.ai> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <tobias.jacobowitz@posteo.de> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> Co-authored-by: Deepankar Mahapatro <deepankar.mahapatro@jina.ai> Co-authored-by: bwanglzu <bo.wang@jina.ai> Co-authored-by: AlaeddineAbdessalem <alaeddine-13@live.fr> Co-authored-by: Zhaofeng Miao <522856232@qq.com>
async_wait_start_success
933415bfa1f9eb89f935037014dfed816eb9815d
jina
__init__.py
13
17
https://github.com/jina-ai/jina.git
6
102
0
34
168
Python
{ "docstring": "\n Wait for the `Pea` to start successfully in a non-blocking manner\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
async def async_wait_start_success(self): import asyncio _timeout = self.args.timeout_ready if _timeout <= 0: _timeout = None else: _timeout /= 1e3 timeout_ns = 1e9 * _timeout if _timeout else None now = time.time_ns() while timeout_ns is None or time.time_ns() - now < timeout_ns: if self.ready_or_shutdown.event.is_set(): self._check_failed_to_start() self.logger.debug(__ready_msg__) return else: await asyncio.sleep(0.1) self._fail_start_timeout(_timeout)
23,550
109,359
55
lib/matplotlib/offsetbox.py
16
9
def set_fontsize(self, s=None): if s is None: s = mpl.rcParams["legend.fontsize"] self.prop = FontProperties(size=s) self.stale = True
Get rcParams from mpl
set_fontsize
438d30b227b1fef7e8733578f851e76a8e360f24
matplotlib
offsetbox.py
10
5
https://github.com/matplotlib/matplotlib.git
2
38
0
13
64
Python
{ "docstring": "\n Set the fontsize in points.\n\n If *s* is not given, reset to :rc:`legend.fontsize`.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
def set_fontsize(self, s=None): if s is None: s = mpl.rcParams["legend.fontsize"] self.prop = FontProperties(size=s) self.stale = True
29,858
132,899
233
python/ray/util/actor_pool.py
71
25
def get_next(self, timeout=None): if not s
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
get_next
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
actor_pool.py
12
18
https://github.com/ray-project/ray.git
6
133
0
54
218
Python
{ "docstring": "Returns the next pending result in order.\n\n This returns the next result produced by submit(), blocking for up to\n the specified timeout until it is available.\n\n Returns:\n The next result.\n\n Raises:\n TimeoutError if the timeout is reached.\n\n Examples:\n >>> pool = ActorPool(...)\n >>> pool.submit(lambda a, v: a.double.remote(v), 1)\n >>> print(pool.get_next())\n 2\n ", "language": "en", "n_whitespaces": 159, "n_words": 51, "vocab_size": 41 }
def get_next(self, timeout=None): if not self.has_next(): raise StopIteration("No more results to get") if self._next_return_index >= self._next_task_index: raise ValueError( "It is not allowed to call get_next() after " "get_next_unordered()." ) future = self._index_to_future[self._next_return_index] if timeout is not None: res, _ = ray.wait([future], timeout=timeout) if not res: raise TimeoutError("Timed out waiting for result") del self._index_to_future[self._next_return_index] self._next_return_index += 1 future_key = tuple(future) if isinstance(future, list) else future i, a = self._future_to_actor.pop(future_key) self._return_actor(a) return ray.get(future)
3,716
21,185
259
pipenv/environment.py
44
26
def expand_egg_links(self) -> None: prefixes = [ Path(prefix) for prefix in self.base_paths["libdirs"].split(os.pathsep) if vistir.path.is_in_path(prefix, self.prefix.as_posix()) ] for loc in prefixes: if not loc.exists(): continue for pth in loc.iterdir(): if not pth.suffix == ".egg-link": continue contents = [ vistir.path.normalize_path(line.strip()) for line in pth
Convert type comments to type annotations
expand_egg_links
4b996c0fa85824b323ad9eff3364dbe2213ebb4c
pipenv
environment.py
16
21
https://github.com/pypa/pipenv.git
8
120
0
31
200
Python
{ "docstring": "\n Expand paths specified in egg-link files to prevent pip errors during\n reinstall\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
def expand_egg_links(self) -> None: prefixes = [ Path(prefix) for prefix in self.base_paths["libdirs"].split(os.pathsep) if vistir.path.is_in_path(prefix, self.prefix.as_posix()) ] for loc in prefixes: if not loc.exists(): continue for pth in loc.iterdir(): if not pth.suffix == ".egg-link": continue contents = [ vistir.path.normalize_path(line.strip()) for line in pth.read_text().splitlines() ] pth.write_text("\n".join(contents))
17,698
83,638
56
zerver/tests/test_link_embed.py
22
10
def test_page_with_og(self) -> None:
preview: Use a dataclass for the embed data. This is significantly cleaner than passing around `Dict[str, Any]` all of the time.
test_page_with_og
327ff9ea0f5e4712a34d767fee55a549cc1d3f39
zulip
test_link_embed.py
9
14
https://github.com/zulip/zulip.git
1
46
0
19
79
Python
{ "docstring": "<html>\n <head>\n <meta property=\"og:title\" content=\"The Rock\" />\n <meta property=\"og:type\" content=\"video.movie\" />\n <meta property=\"og:url\" content=\"http://www.imdb.com/title/tt0117500/\" />\n <meta property=\"og:image\" content=\"http://ia.media-imdb.com/images/rock.jpg\" />\n <meta property=\"og:description\" content=\"The Rock film\" />\n </head>\n </html>", "language": "en", "n_whitespaces": 96, "n_words": 27, "vocab_size": 18 }
def test_page_with_og(self) -> None: html = b parser = OpenGraphParser(html, "text/html; charset=UTF-8") result = parser.extract_data() self.assertEqual(result.title, "The Rock") self.assertEqual(result.description, "The Rock film")
48,207
196,831
120
sympy/core/expr.py
26
9
def is_rational_function(self, *syms): if self in _illegal: return False if syms: syms = set(map(sympify, syms)) else: syms = self.free_symbols if not syms: return True return self._eval_is_rational_function(syms)
Moved definition of illegal
is_rational_function
117f9554466e08aa4178137ad65fae1f2d49b340
sympy
expr.py
12
10
https://github.com/sympy/sympy.git
4
50
0
19
83
Python
{ "docstring": "\n Test whether function is a ratio of two polynomials in the given\n symbols, syms. When syms is not given, all free symbols will be used.\n The rational function does not have to be in expanded or in any kind of\n canonical form.\n\n This function returns False for expressions that are \"rational\n functions\" with symbolic exponents. Thus, you should be able to call\n .as_numer_denom() and apply polynomial algorithms to the result for\n expressions for which this returns True.\n\n This is not part of the assumptions system. You cannot do\n Symbol('z', rational_function=True).\n\n Examples\n ========\n\n >>> from sympy import Symbol, sin\n >>> from sympy.abc import x, y\n\n >>> (x/y).is_rational_function()\n True\n\n >>> (x**2).is_rational_function()\n True\n\n >>> (x/sin(y)).is_rational_function(y)\n False\n\n >>> n = Symbol('n', integer=True)\n >>> (x**n + 1).is_rational_function(x)\n False\n\n This function does not attempt any nontrivial simplifications that may\n result in an expression that does not appear to be a rational function\n to become one.\n\n >>> from sympy import sqrt, factor\n >>> y = Symbol('y', positive=True)\n >>> a = sqrt(y**2 + 2*y + 1)/y\n >>> a.is_rational_function(y)\n False\n >>> factor(a)\n (y + 1)/y\n >>> factor(a).is_rational_function(y)\n True\n\n See also is_algebraic_expr().\n\n ", "language": "en", "n_whitespaces": 444, "n_words": 182, "vocab_size": 114 }
def is_rational_function(self, *syms): if self in _illegal: return False if syms: syms = set(map(sympify, syms)) else: syms = self.free_symbols if not syms: return True return self._eval_is_rational_function(syms)
48,713
197,838
39
sympy/polys/numberfields/primes.py
11
14
def reduce_alg_num(self, a): elt = self.ZK.parent.element_from_alg_num(a) red = self.reduce_element(elt) return a.field_element(list(reversed(red.QQ_col.f
Improve `PrimeIdeal` reduction methods.
reduce_alg_num
af44b30d68265acb25340374b648e198fb5570e7
sympy
primes.py
14
4
https://github.com/sympy/sympy.git
1
47
0
10
78
Python
{ "docstring": "\n Reduce an :py:class:`~.AlgebraicNumber` to a \"small representative\"\n modulo this prime ideal.\n\n Parameters\n ==========\n\n elt : :py:class:`~.AlgebraicNumber`\n The element to be reduced.\n\n Returns\n =======\n\n :py:class:`~.AlgebraicNumber`\n The reduced element.\n\n See Also\n ========\n\n reduce_element\n reduce_ANP\n .Submodule.reduce_element\n\n ", "language": "en", "n_whitespaces": 154, "n_words": 33, "vocab_size": 29 }
def reduce_alg_num(self, a): elt = self.ZK.parent.element_from_alg_num(a) red = self.reduce_element(elt) return a.field_element(list(reversed(red.QQ_col.flat())))
118,204
322,610
452
paddlenlp/taskflow/task.py
79
24
def _auto_joiner(self, short_results, input_mapping, is_dict=False): concat_results = [] elem_type = {} if is_dict else [] for k, vs in input_mapping.items(): single_results = elem_type for v in vs: if len(single_results) == 0: single_results = short_results[v] elif isinstance(elem_type, list): single_results.extend(short_results[v]) elif isinstance(elem_type, dict): for sk in single_results.keys(): if isinstance(single_results[sk], str): single_results[sk] += short_results[v][sk] else: single_results[sk].extend(short_results[v][sk]) else: raise ValueError( "Invalid element type, the type of results " "for each element should be list of dict, " "but {} received.".format(t
Update Taskflow word_segmentation and ner tasks (#1666) * Add AutoSplitter & AutoJoiner * codestyle fix * unify auto joiner * add comments * add sentence split mode * update params * add paddle version check * add wordtag for word_segmentation * add wordtag for word_segmentation * add ner-lac and word_segmentation-jieba * add return entities only for ner * fix ci * fix ci * fix ci * fix ci * fix ci * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * fix bugs of dataloader * remove guard * use fast mode for rnn example * Update README.md * Update README.md
_auto_joiner
1e2ee01dade0d4076ba98aa613c3eb150c615abb
PaddleNLP
task.py
21
23
https://github.com/PaddlePaddle/PaddleNLP.git
9
159
0
59
252
Python
{ "docstring": "\n Join the short results automatically and generate the final results to match with the user inputs.\n Args:\n short_results (List[dict] / List[List[str]] / List[str]): input raw texts.\n input_mapping (dict): cutting length.\n is_dict (bool): whether the element type is dict, default to False.\n return:\n short_input_texts (List[str]): the short input texts for model inference.\n ", "language": "en", "n_whitespaces": 124, "n_words": 51, "vocab_size": 42 }
def _auto_joiner(self, short_results, input_mapping, is_dict=False): concat_results = [] elem_type = {} if is_dict else [] for k, vs in input_mapping.items(): single_results = elem_type for v in vs: if len(single_results) == 0: single_results = short_results[v] elif isinstance(elem_type, list): single_results.extend(short_results[v]) elif isinstance(elem_type, dict): for sk in single_results.keys(): if isinstance(single_results[sk], str): single_results[sk] += short_results[v][sk] else: single_results[sk].extend(short_results[v][sk]) else: raise ValueError( "Invalid element type, the type of results " "for each element should be list of dict, " "but {} received.".format(type(single_results))) concat_results.append(single_results) return concat_results
80,043
269,373
125
keras/applications/efficientnet_weight_update_util.py
66
9
def get_variable_names_from_ckpt(path_ckpt, use_ema=True): v_all = tf.train.list_variables(path_ckpt) # keep name only v_name_all = [x[0] for x in v_all] if use_ema: v_name_all = [x for x in v_name_all if "ExponentialMovingAverage" in x] else: v_name_all = [ x for x in v_name_all if "ExponentialMovingAverage" not in x ] # remove util variables used for RMSprop v_
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
get_variable_names_from_ckpt
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
efficientnet_weight_update_util.py
13
11
https://github.com/keras-team/keras.git
9
80
0
32
130
Python
{ "docstring": "Get list of tensor names from checkpoint.\n\n Args:\n path_ckpt: str, path to the ckpt files\n use_ema: Bool, whether to use ExponentialMovingAverage result or not.\n Returns:\n List of variable names from checkpoint.\n ", "language": "en", "n_whitespaces": 55, "n_words": 31, "vocab_size": 26 }
def get_variable_names_from_ckpt(path_ckpt, use_ema=True): v_all = tf.train.list_variables(path_ckpt) # keep name only v_name_all = [x[0] for x in v_all] if use_ema: v_name_all = [x for x in v_name_all if "ExponentialMovingAverage" in x] else: v_name_all = [ x for x in v_name_all if "ExponentialMovingAverage" not in x ] # remove util variables used for RMSprop v_name_all = [x for x in v_name_all if "RMS" not in x] return v_name_all
48,925
198,418
103
sympy/solvers/deutils.py
38
14
def ode_order(expr, func):
Improve loop performance in solvers
ode_order
bd9f607176c58dfba01e27c05c2b7d49ff97c901
sympy
deutils.py
17
11
https://github.com/sympy/sympy.git
6
103
0
26
161
Python
{ "docstring": "\n Returns the order of a given differential\n equation with respect to func.\n\n This function is implemented recursively.\n\n Examples\n ========\n\n >>> from sympy import Function\n >>> from sympy.solvers.deutils import ode_order\n >>> from sympy.abc import x\n >>> f, g = map(Function, ['f', 'g'])\n >>> ode_order(f(x).diff(x, 2) + f(x).diff(x)**2 +\n ... f(x).diff(x), f(x))\n 2\n >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), f(x))\n 2\n >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), g(x))\n 3\n\n ", "language": "en", "n_whitespaces": 119, "n_words": 67, "vocab_size": 46 }
def ode_order(expr, func): a = Wild('a', exclude=[func]) if expr.match(a): return 0 if isinstance(expr, Derivative): if expr.args[0] == func: return len(expr.variables) else: return max(ode_order(arg, func) for arg in expr.args[0].args) + len(expr.variables) else: return max(ode_order(arg, func) for arg in expr.args)
91,035
291,932
866
homeassistant/components/discord/notify.py
170
53
async def async_send_message(self, message, **kwargs): nextcord.VoiceClient.warn_nacl = False discord_bot = nextcord.Client() images = None embedding = None if ATTR_TARGET not in kwargs: _LOGGER.error("No target specified") return None data = kwargs.get(ATTR_DATA) or {} embeds: list[nextcord.Embed] = [] if ATTR_EMBED in data: embedding = data[ATTR_EMBED] fields = embedding.get(ATTR_EMBED_FIELDS) or [] if embedding: embed = nextcord.Embed(**embedding) for field in fields: embed.add_field(**field) if ATTR_EMBED_FOOTER in embedding: embed.set_footer(**embedding[ATTR_EMBED_FOOTER]) if ATTR_EMBED_AUTHOR in embedding: embed.set_author(**embedding[ATTR_EMBED_AUTHOR]) if ATTR_EMBED_THUMBNAIL in embedding: embed.set_thumbnail(**embedding[ATTR_EMBED_THUMBNAIL]) embeds.append(embed) if ATTR_IMAGES in data: images = [] for image in data.get(ATTR_IMAGES, []): image_exists = await self.hass.async_add_executor_job( self.file_exists, image ) if image_exists: images.
Replace discord.py with nextcord (#66540) * Replace discord.py with nextcord * Typing tweak * Another pip check decrease :)
async_send_message
cb03db8df4bf8b50945b36a4b0debcaaed1190a8
core
notify.py
18
51
https://github.com/home-assistant/core.git
19
347
0
102
564
Python
{ "docstring": "Login to Discord, send message to channel(s) and log out.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
async def async_send_message(self, message, **kwargs): nextcord.VoiceClient.warn_nacl = False discord_bot = nextcord.Client() images = None embedding = None if ATTR_TARGET not in kwargs: _LOGGER.error("No target specified") return None data = kwargs.get(ATTR_DATA) or {} embeds: list[nextcord.Embed] = [] if ATTR_EMBED in data: embedding = data[ATTR_EMBED] fields = embedding.get(ATTR_EMBED_FIELDS) or [] if embedding: embed = nextcord.Embed(**embedding) for field in fields: embed.add_field(**field) if ATTR_EMBED_FOOTER in embedding: embed.set_footer(**embedding[ATTR_EMBED_FOOTER]) if ATTR_EMBED_AUTHOR in embedding: embed.set_author(**embedding[ATTR_EMBED_AUTHOR]) if ATTR_EMBED_THUMBNAIL in embedding: embed.set_thumbnail(**embedding[ATTR_EMBED_THUMBNAIL]) embeds.append(embed) if ATTR_IMAGES in data: images = [] for image in data.get(ATTR_IMAGES, []): image_exists = await self.hass.async_add_executor_job( self.file_exists, image ) if image_exists: images.append(image) else: _LOGGER.warning("Image not found: %s", image) await discord_bot.login(self.token) try: for channelid in kwargs[ATTR_TARGET]: channelid = int(channelid) try: channel = await discord_bot.fetch_channel(channelid) except nextcord.NotFound: try: channel = await discord_bot.fetch_user(channelid) except nextcord.NotFound: _LOGGER.warning("Channel not found for ID: %s", channelid) continue # Must create new instances of File for each channel. files = [nextcord.File(image) for image in images] if images else [] await channel.send(message, files=files, embeds=embeds) except (nextcord.HTTPException, nextcord.NotFound) as error: _LOGGER.warning("Communication error: %s", error) await discord_bot.close()
@pytest.fixture( params=[ ( Interval(left=0, right=5, inclusive="right"), IntervalDtype("int64", inclusive="right"), ), ( Interval(left=0.1, right=0.5, inclusive="right"), IntervalDtype("float64", inclusive="right"), ), (Period("2012-01", freq="M"), "period[M]"), (Period("2012-02-01", freq="D"), "period[D]"), ( Timestamp("2011-01-01", tz="US/Eastern"), DatetimeTZDtype(tz="US/Eastern"), ), (Timedelta(seconds=500), "timedelta64[ns]"), ] )
40,065
167,613
290
pandas/conftest.py
78
24
def rand_series_with_duplicate_datetimeindex() -> Series: dates = [ datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 5), ] return Series(np.random.randn(len(dates)), index=dates) # ---------------------------------------------------------------- # Scalars # ---------------------------------------------------------------- @pytest.fixture( params=[ ( Interval(left=0, right=5, inclusive="right"), IntervalDtype("int64", inclusive="right"), ), ( Interval(lef
TYP: misc return type annotations (#47558)
rand_series_with_duplicate_datetimeindex
f538568afc2c76c2d738d32e3544cf9fe6742960
pandas
conftest.py
13
17
https://github.com/pandas-dev/pandas.git
1
120
1
43
360
Python
{ "docstring": "\n Fixture for Series with a DatetimeIndex that has duplicates.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
def rand_series_with_duplicate_datetimeindex() -> Series: dates = [ datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 5), ] return Series(np.random.randn(len(dates)), index=dates) # ---------------------------------------------------------------- # Scalars # ---------------------------------------------------------------- @pytest.fixture( params=[ ( Interval(left=0, right=5, inclusive="right"), IntervalDtype("int64", inclusive="right"), ), ( Interval(left=0.1, right=0.5, inclusive="right"), IntervalDtype("float64", inclusive="right"), ), (Period("2012-01", freq="M"), "period[M]"), (Period("2012-02-01", freq="D"), "period[D]"), ( Timestamp("2011-01-01", tz="US/Eastern"), DatetimeTZDtype(tz="US/Eastern"), ), (Timedelta(seconds=500), "timedelta64[ns]"), ] )
12,285
60,770
18
.venv/lib/python3.8/site-packages/pip/_internal/locations/base.py
9
4
def get_major_minor_version(): # typ
upd; format
get_major_minor_version
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
base.py
9
2
https://github.com/jindongwang/transferlearning.git
1
15
0
9
31
Python
{ "docstring": "\n Return the major-minor version of the current Python as a string, e.g.\n \"3.7\" or \"3.10\".\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 14 }
def get_major_minor_version(): # type: () -> str return "{}.{}".format(*sys.version_info)
70,501
244,731
1,066
tests/test_models/test_dense_heads/test_ssd_head.py
232
61
def test_ssd_head_loss(self): s = 300 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, }] cfg = Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), sampler=dict(type='PseudoSampler'), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False)) ssd_head = SSDHe
Update SSD and PISA-SSD model config
test_ssd_head_loss
9d7511d8c35df1f9c13b17eb770136859bf370be
mmdetection
test_ssd_head.py
15
64
https://github.com/open-mmlab/mmdetection.git
2
471
0
154
677
Python
{ "docstring": "Tests ssd head loss when truth is empty and non-empty.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def test_ssd_head_loss(self): s = 300 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, }] cfg = Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), sampler=dict(type='PseudoSampler'), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False)) ssd_head = SSDHead( num_classes=4, in_channels=(1, 1, 1, 1, 1, 1), stacked_convs=1, feat_channels=1, use_depthwise=True, anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=s, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32, 64, 100, 300], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), train_cfg=cfg) # SSD head expects a multiple levels of features per image feats = ( torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0])) for stride in ssd_head.prior_generator.strides) cls_scores, bbox_preds = ssd_head.forward(feats) # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) empty_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances], img_metas) # When there is no truth, cls_loss and box_loss should all be zero. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) self.assertEqual( empty_cls_loss.item(), 0, 'there should be no cls loss when there are no true boxes') self.assertEqual( empty_box_loss.item(), 0, 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss # should be nonzero for random inputs gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor( [[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) one_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances], img_metas) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) self.assertGreater(onegt_cls_loss.item(), 0, 'cls loss should be non-zero') self.assertGreater(onegt_box_loss.item(), 0, 'box loss should be non-zero')
20,129
100,671
394
tools/alignments/jobs.py
160
24
def _legacy_check(self) -> None: if self._min_size > 0 or self._arguments.extract_every_n != 1: logger.warning("This alignments file was generated with the legacy extraction method.") logger.warning("You should run this extraction job, but with 'min_size' set to 0 and " "'extract-every-n' set to 1 to update the alignments file.") logger.warning("You can then re-run this extraction job with your chosen options.") sys.exit(0) maskers = ["components", "extended"] nn_masks = [mask for mask in list(self._alignments.mask_summary) if mask not in maskers] logtype = logger.warning if nn_masks else logger.info logtype("This alignments file was created with the legacy extraction method and will be " "updated.") logtype("Faces will be extracted using the new method and landmarks based masks will be " "regenerated.") if nn_masks: logtype("However, the NN based masks '%s' will be cropped to the legacy extraction " "method, so you may want to run the mask tool to regenerate these " "masks.", "', '".join(nn_masks)) self._mask_pipeline = Extractor(None, None, maskers, multiproce
Alignments tool - Replace 'extract-large' with 'min-size'
_legacy_check
a9908b46f77dc66ac7efe7100ea0eed4b1f2b460
faceswap
jobs.py
12
26
https://github.com/deepfakes/faceswap.git
7
143
0
103
256
Python
{ "docstring": " Check whether the alignments file was created with the legacy extraction method.\n\n If so, force user to re-extract all faces if any options have been specified, otherwise\n raise the appropriate warnings and set the legacy options.\n ", "language": "en", "n_whitespaces": 58, "n_words": 36, "vocab_size": 32 }
def _legacy_check(self) -> None: if self._min_size > 0 or self._arguments.extract_every_n != 1: logger.warning("This alignments file was generated with the legacy extraction method.") logger.warning("You should run this extraction job, but with 'min_size' set to 0 and " "'extract-every-n' set to 1 to update the alignments file.") logger.warning("You can then re-run this extraction job with your chosen options.") sys.exit(0) maskers = ["components", "extended"] nn_masks = [mask for mask in list(self._alignments.mask_summary) if mask not in maskers] logtype = logger.warning if nn_masks else logger.info logtype("This alignments file was created with the legacy extraction method and will be " "updated.") logtype("Faces will be extracted using the new method and landmarks based masks will be " "regenerated.") if nn_masks: logtype("However, the NN based masks '%s' will be cropped to the legacy extraction " "method, so you may want to run the mask tool to regenerate these " "masks.", "', '".join(nn_masks)) self._mask_pipeline = Extractor(None, None, maskers, multiprocess=True) self._mask_pipeline.launch() # Update alignments versioning self._alignments._version = _VERSION # pylint:disable=protected-access
48,106
196,688
18
sympy/stats/crv_types.py
15
6
def FisherZ(name, d1, d2): r return rv(name, FisherZDistribution, (d1, d2)) #------------------------------------------------------------------------------- # Frechet distribution -----
Documentation cleanup 5
FisherZ
9ad8ab9fe58051cf11626ba6654852fcfec60147
sympy
crv_types.py
8
61
https://github.com/sympy/sympy.git
1
24
0
15
36
Python
{ "docstring": "\n Create a Continuous Random Variable with an Fisher's Z distribution.\n\n Explanation\n ===========\n\n The density of the Fisher's Z distribution is given by\n\n .. math::\n f(x) := \\frac{2d_1^{d_1/2} d_2^{d_2/2}} {\\mathrm{B}(d_1/2, d_2/2)}\n \\frac{e^{d_1z}}{\\left(d_1e^{2z}+d_2\\right)^{\\left(d_1+d_2\\right)/2}}\n\n\n .. TODO - What is the difference between these degrees of freedom?\n\n Parameters\n ==========\n\n d1 : `d_1 > 0`\n Degree of freedom.\n d2 : `d_2 > 0`\n Degree of freedom.\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import FisherZ, density\n >>> from sympy import Symbol, pprint\n\n >>> d1 = Symbol(\"d1\", positive=True)\n >>> d2 = Symbol(\"d2\", positive=True)\n >>> z = Symbol(\"z\")\n\n >>> X = FisherZ(\"x\", d1, d2)\n\n >>> D = density(X)(z)\n >>> pprint(D, use_unicode=False)\n d1 d2\n d1 d2 - -- - --\n -- -- 2 2\n 2 2 / 2*z \\ d1*z\n 2*d1 *d2 *\\d1*e + d2/ *e\n -----------------------------------------\n /d1 d2\\\n B|--, --|\n \\2 2 /\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Fisher%27s_z-distribution\n .. [2] http://mathworld.wolfram.com/Fishersz-Distribution.html\n\n ", "language": "en", "n_whitespaces": 459, "n_words": 145, "vocab_size": 98 }
def FisherZ(name, d1, d2): r return rv(name, FisherZDistribution, (d1, d2)) #------------------------------------------------------------------------------- # Frechet distribution ---------------------------------------------------------
6,828
37,529
177
src/transformers/trainer_pt_utils.py
60
17
def find_batch_size(tensors): if isinstance(tensors, (list, tuple)): for t in tensors: result = find_batch_size(t) if result is not None: return result elif isinstance(tensors, Mapping): for key, value in tensors.items(): result = find_batch_size(value) if result is not None: return result elif isinstance(tensors, torch.Tensor): return tensors.shape[0] if len(tensor
Replace dict/BatchEncoding instance checks by Mapping (#17014) * Replace dict/BatchEncoding instance checks by Mapping * Typo
find_batch_size
18df440709f1b19d1c5617c0d987c5ff8fd0915d
transformers
trainer_pt_utils.py
13
15
https://github.com/huggingface/transformers.git
11
126
0
31
192
Python
{ "docstring": "\n Find the first dimension of a tensor in a nested list/tuple/dict of tensors.\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 11 }
def find_batch_size(tensors): if isinstance(tensors, (list, tuple)): for t in tensors: result = find_batch_size(t) if result is not None: return result elif isinstance(tensors, Mapping): for key, value in tensors.items(): result = find_batch_size(value) if result is not None: return result elif isinstance(tensors, torch.Tensor): return tensors.shape[0] if len(tensors.shape) >= 1 else None elif isinstance(tensors, np.ndarray): return tensors.shape[0] if len(tensors.shape) >= 1 else None
6,842
37,632
35
src/transformers/models/yolos/feature_extraction_yolos.py
14
11
def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5): out_logits, raw_masks = outputs.logits, outputs.pred_masks preds = []
Add YOLOS (#16848) * First draft * Add YolosForObjectDetection * Make forward pass work * Add mid position embeddings * Add interpolation of position encodings * Add expected values * Add YOLOS to tests * Add integration test * Support tiny model as well * Support all models in conversion script * Remove mid_pe_size attribute * Make more tests pass * Add model to README and fix config * Add copied from statements * Rename base_model_prefix to vit * Add missing YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP * Apply suggestions from code review * Apply more suggestions from code review * Convert remaining checkpoints * Improve docstrings * Add YolosFeatureExtractor * Add feature extractor to docs * Add corresponding tests * Fix style * Fix docs * Apply suggestion from code review * Fix bad rebase * Fix some more bad rebase * Fix missing character * Improve docs and variable names Co-authored-by: Niels Rogge <nielsrogge@Nielss-MacBook-Pro.local>
post_process_segmentation
1ac698744c4dbdf1495d303246d08ffacdf4f5b8
transformers
feature_extraction_yolos.py
8
16
https://github.com/huggingface/transformers.git
2
196
0
13
51
Python
{ "docstring": "\n Converts the output of [`DetrForSegmentation`] into image segmentation predictions. Only supports PyTorch.\n\n Parameters:\n outputs ([`DetrSegmentationOutput`]):\n Raw outputs of the model.\n target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`):\n Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction.\n threshold (`float`, *optional*, defaults to 0.9):\n Threshold to use to filter out queries.\n mask_threshold (`float`, *optional*, defaults to 0.5):\n Threshold to use when turning the predicted masks into binary values.\n\n Returns:\n `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image\n in the batch as predicted by the model.\n ", "language": "en", "n_whitespaces": 256, "n_words": 101, "vocab_size": 73 }
def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5): out_logits, raw_masks = outputs.logits, outputs.pred_masks preds = []
47,707
196,207
114
sympy/combinatorics/subsets.py
14
12
def iterate_graycode(self, k): unranked_code = GrayCode.unrank(self.superset_size,
Updated import locations
iterate_graycode
498015021131af4dbb07eb110e5badaba8250c7b
sympy
subsets.py
12
5
https://github.com/sympy/sympy.git
1
41
0
14
64
Python
{ "docstring": "\n Helper function used for prev_gray and next_gray.\n It performs ``k`` step overs to get the respective Gray codes.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Subset\n >>> a = Subset([1, 2, 3], [1, 2, 3, 4])\n >>> a.iterate_graycode(3).subset\n [1, 4]\n >>> a.iterate_graycode(-2).subset\n [1, 2, 4]\n\n See Also\n ========\n\n next_gray, prev_gray\n ", "language": "en", "n_whitespaces": 148, "n_words": 49, "vocab_size": 39 }
def iterate_graycode(self, k): unranked_code = GrayCode.unrank(self.superset_size, (self.rank_gray + k) % self.cardinality) return Subset.subset_from_bitlist(self.superset, unranked_code)
81,150
273,879
32
keras/layers/rnn/gru_lstm_utils.py
17
15
def is_sequence_right_padded(mask): max_seq_length = tf.shape(mask)[1] count_of_true = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1) right_padded_mask = tf.sequence_mask(count_of_true, maxlen=max_seq_length)
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
is_sequence_right_padded
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
gru_lstm_utils.py
11
5
https://github.com/keras-team/keras.git
1
64
0
15
100
Python
{ "docstring": "Check the mask tensor and see if it right padded.\n\n For cuDNN kernel, it uses the sequence length param to skip the tailing\n timestep. If the data is left padded, or not a strict right padding (has\n masked value in the middle of the sequence), then cuDNN kernel won't be work\n properly in those cases.\n\n Left padded data: [[False, False, True, True, True]].\n Right padded data: [[True, True, True, False, False]].\n Mixture of mask/unmasked data: [[True, False, True, False, False]].\n\n Note that for the mixed data example above, the actually data RNN should see\n are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not\n pollute the internal states.\n\n Args:\n mask: the Boolean tensor with shape [batch, timestep]\n\n Returns:\n boolean scalar tensor, whether the mask is strictly right padded.\n ", "language": "en", "n_whitespaces": 184, "n_words": 135, "vocab_size": 93 }
def is_sequence_right_padded(mask): max_seq_length = tf.shape(mask)[1] count_of_true = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1) right_padded_mask = tf.sequence_mask(count_of_true, maxlen=max_seq_length) return tf.reduce_all(tf.equal(mask, right_padded_mask))
71,154
246,321
385
tests/rest/client/test_third_party_rules.py
49
17
def _send_event_over_federation(self) -> None: body = { "pdus": [ { "sender": self.user_id, "type": EventTypes.Message, "state_key": "", "content": {"body": "hello world", "msgtype": "m.text"}, "room_id": self.room_id, "depth": 0, "origin_server_ts": self.clock.time_msec(),
Tests: replace mocked Authenticator with the real thing (#11913) If we prepopulate the test homeserver with a key for a remote homeserver, we can make federation requests to it without having to stub out the authenticator. This has two advantages: * means that what we are testing is closer to reality (ie, we now have complete tests for the incoming-request-authorisation flow) * some tests require that other objects be signed by the remote server (eg, the event in `/send_join`), and doing that would require a whole separate set of mocking out. It's much simpler just to use real keys.
_send_event_over_federation
c3db7a0b59d48b8872bc24096f9a2467ef35f703
synapse
test_third_party_rules.py
14
25
https://github.com/matrix-org/synapse.git
1
120
0
44
211
Python
{ "docstring": "Send a dummy event over federation and check that the request succeeds.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def _send_event_over_federation(self) -> None: body = { "pdus": [ { "sender": self.user_id, "type": EventTypes.Message, "state_key": "", "content": {"body": "hello world", "msgtype": "m.text"}, "room_id": self.room_id, "depth": 0, "origin_server_ts": self.clock.time_msec(), "prev_events": [], "auth_events": [], "signatures": {}, "unsigned": {}, } ], } channel = self.make_signed_federation_request( method="PUT", path="/_matrix/federation/v1/send/1", content=body, ) self.assertEqual(channel.code, 200, channel.result)
19,876
100,391
88
plugins/train/trainer/_base.py
26
15
def _print_loss(self, loss): output = ", ".join([f"Loss {side}: {side_loss:.5f}" for side, side_loss in zip(("A", "B"), loss)]) timestamp = time.strftime("%H:%M:%S") output = f"[{timestamp}] [#{self._model.iterations:05d}] {output}"
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
_print_loss
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
faceswap
_base.py
14
6
https://github.com/deepfakes/faceswap.git
2
55
0
23
132
Python
{ "docstring": " Outputs the loss for the current iteration to the console.\n\n Parameters\n ----------\n loss: list\n The loss for each side. List should contain 2 ``floats`` side \"a\" in position 0 and\n side \"b\" in position `.\n ", "language": "en", "n_whitespaces": 87, "n_words": 35, "vocab_size": 28 }
def _print_loss(self, loss): output = ", ".join([f"Loss {side}: {side_loss:.5f}" for side, side_loss in zip(("A", "B"), loss)]) timestamp = time.strftime("%H:%M:%S") output = f"[{timestamp}] [#{self._model.iterations:05d}] {output}" print(f"\r{output}", end="")
56,285
221,238
41
python3.10.4/Lib/calendar.py
16
9
def itermonthdays2(self, year, month): for i, d in enumerate(self.itermonthdays(year, mont
add python 3.10.4 for windows
itermonthdays2
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
calendar.py
10
3
https://github.com/XX-net/XX-Net.git
2
37
0
16
57
Python
{ "docstring": "\n Like itermonthdates(), but will yield (day number, weekday number)\n tuples. For days outside the specified month the day number is 0.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 20 }
def itermonthdays2(self, year, month): for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): yield d, i % 7
99,493
300,633
36
tests/helpers/test_template.py
17
11
def test_distance_function_return_none_if_invalid_state(hass): hass.states.async_set("test.object_2", "happy", {"latitude": 10}) tpl = template.Template("{{ distance(states.test.object_2) | round }}", hass) with pytes
Fail template functions when no default specified (#71687)
test_distance_function_return_none_if_invalid_state
4885331509eeffe50f42d76b234996467b06170f
core
test_template.py
10
5
https://github.com/home-assistant/core.git
1
45
0
17
83
Python
{ "docstring": "Test distance function return None if invalid state.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def test_distance_function_return_none_if_invalid_state(hass): hass.states.async_set("test.object_2", "happy", {"latitude": 10}) tpl = template.Template("{{ distance(states.test.object_2) | round }}", hass) with pytest.raises(TemplateError): tpl.async_render()
elif sys.version_info[:2] >= (3, 7):sys
3,609
20,890
25
pipenv/patched/notpip/_vendor/typing_extensions.py
13
7
def Concatenate(self, parameters): return _con
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
Concatenate
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
typing_extensions.py
7
2
https://github.com/pypa/pipenv.git
1
15
2
13
48
Python
{ "docstring": "Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a\n higher order function which adds, removes or transforms parameters of a\n callable.\n\n For example::\n\n Callable[Concatenate[int, P], int]\n\n See PEP 612 for detailed information.\n ", "language": "en", "n_whitespaces": 78, "n_words": 33, "vocab_size": 32 }
def Concatenate(self, parameters): return _concatenate_getitem(self, parameters) # 3.7-8 elif sys.version_info[:2] >= (3, 7):
26,587
119,349
116
tests/ann_test.py
55
19
def compute_recall(result_neighbors, ground_truth_neighbors) -> float:
[JAX] Move ann.ann_recall back to tests. The function is simple enough for users to implement their own on the host. PiperOrigin-RevId: 430696789
compute_recall
8372b98c4856b6b2363b7bb28abdb4579440a656
jax
ann_test.py
16
25
https://github.com/google/jax.git
5
105
0
37
164
Python
{ "docstring": "Computes the recall of an approximate nearest neighbor search.\n\n Args:\n result_neighbors: int32 numpy array of the shape [num_queries,\n neighbors_per_query] where the values are the indices of the dataset.\n ground_truth_neighbors: int32 numpy array of with shape [num_queries,\n ground_truth_neighbors_per_query] where the values are the indices of the\n dataset.\n\n Returns:\n The recall.\n ", "language": "en", "n_whitespaces": 76, "n_words": 49, "vocab_size": 28 }
def compute_recall(result_neighbors, ground_truth_neighbors) -> float: assert len( result_neighbors.shape) == 2, "shape = [num_queries, neighbors_per_query]" assert len(ground_truth_neighbors.shape ) == 2, "shape = [num_queries, ground_truth_neighbors_per_query]" assert result_neighbors.shape[0] == ground_truth_neighbors.shape[0] gt_sets = [set(np.asarray(x)) for x in ground_truth_neighbors] hits = sum( len(list(x for x in nn_per_q if x.item() in gt_sets[q])) for q, nn_per_q in enumerate(result_neighbors)) return hits / ground_truth_neighbors.size
39,469
163,635
108
pandas/core/arrays/datetimes.py
30
15
def isocalendar(self) -> DataFrame: from pandas import DataFrame values = self._local_timestamps() sarray = fields.build_isocalendar_sarray(values) iso_calendar_df = DataFrame( sarray, columns=["year", "week", "day"], dtype="UInt32" ) if self._hasna: iso_calendar_df.iloc[self._isnan] = None
EA interface: rename ExtensionArray._hasnans to ._hasna (#45519)
isocalendar
a0b40c0f2ad73420a54e48ec4f564b9667e3f452
pandas
datetimes.py
11
44
https://github.com/pandas-dev/pandas.git
2
64
0
26
109
Python
{ "docstring": "\n Returns a DataFrame with the year, week, and day calculated according to\n the ISO 8601 standard.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame\n with columns year, week and day\n\n See Also\n --------\n Timestamp.isocalendar : Function return a 3-tuple containing ISO year,\n week number, and weekday for the given Timestamp object.\n datetime.date.isocalendar : Return a named tuple object with\n three components: year, week and weekday.\n\n Examples\n --------\n >>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4)\n >>> idx.isocalendar()\n year week day\n 2019-12-29 2019 52 7\n 2019-12-30 2020 1 1\n 2019-12-31 2020 1 2\n 2020-01-01 2020 1 3\n >>> idx.isocalendar().week\n 2019-12-29 52\n 2019-12-30 1\n 2019-12-31 1\n 2020-01-01 1\n Freq: D, Name: week, dtype: UInt32\n ", "language": "en", "n_whitespaces": 384, "n_words": 108, "vocab_size": 70 }
def isocalendar(self) -> DataFrame: from pandas import DataFrame values = self._local_timestamps() sarray = fields.build_isocalendar_sarray(values) iso_calendar_df = DataFrame( sarray, columns=["year", "week", "day"], dtype="UInt32" ) if self._hasna: iso_calendar_df.iloc[self._isnan] = None return iso_calendar_df
70,002
243,180
264
src/PIL/Image.py
71
17
def putpixel(self, xy, value): if self.readonly: self._copy() self.load() if self.pyaccess: return self.pyaccess.putpixel(xy, value) if ( self.mode in ("P", "PA") and isinstance(value, (list, tuple)) and len(value) in [3, 4] ): # RGB or RGBA value for a P or PA image if self.mode == "PA": alpha = value[3] if len(value) == 4 else 255 valu
Allow RGB and RGBA values for PA image putpixel
putpixel
a37593f004247ebf69d5582524da6dc5143cb023
Pillow
Image.py
14
18
https://github.com/python-pillow/Pillow.git
9
142
0
49
225
Python
{ "docstring": "\n Modifies the pixel at the given position. The color is given as\n a single numerical value for single-band images, and a tuple for\n multi-band images. In addition to this, RGB and RGBA tuples are\n accepted for P and PA images.\n\n Note that this method is relatively slow. For more extensive changes,\n use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`\n module instead.\n\n See:\n\n * :py:meth:`~PIL.Image.Image.paste`\n * :py:meth:`~PIL.Image.Image.putdata`\n * :py:mod:`~PIL.ImageDraw`\n\n :param xy: The pixel coordinate, given as (x, y). See\n :ref:`coordinate-system`.\n :param value: The pixel value.\n ", "language": "en", "n_whitespaces": 191, "n_words": 81, "vocab_size": 60 }
def putpixel(self, xy, value): if self.readonly: self._copy() self.load() if self.pyaccess: return self.pyaccess.putpixel(xy, value) if ( self.mode in ("P", "PA") and isinstance(value, (list, tuple)) and len(value) in [3, 4] ): # RGB or RGBA value for a P or PA image if self.mode == "PA": alpha = value[3] if len(value) == 4 else 255 value = value[:3] value = self.palette.getcolor(value, self) if self.mode == "PA": value = (value, alpha) return self.im.putpixel(xy, value)
99,301
300,441
293
tests/components/template/test_switch.py
55
14
async def test_available_template_with_entities(hass): await setup.async_setup_component( hass, "switch", { "switch": { "platform": "template", "switches": { "test_template_switch": { **OPTIMISTIC_SWITCH_CONFIG,
Tweak template switch tests (#71738)
test_available_template_with_entities
11cc1feb853bcfd9633ebfc44eae142c10a7f983
core
test_switch.py
17
26
https://github.com/home-assistant/core.git
1
123
0
34
224
Python
{ "docstring": "Test availability templates with values from other entities.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
async def test_available_template_with_entities(hass): await setup.async_setup_component( hass, "switch", { "switch": { "platform": "template", "switches": { "test_template_switch": { **OPTIMISTIC_SWITCH_CONFIG, "value_template": "{{ 1 == 1 }}", "availability_template": "{{ is_state('availability_state.state', 'on') }}", } }, } }, ) await hass.async_block_till_done() await hass.async_start() await hass.async_block_till_done() hass.states.async_set("availability_state.state", STATE_ON) await hass.async_block_till_done() assert hass.states.get("switch.test_template_switch").state != STATE_UNAVAILABLE hass.states.async_set("availability_state.state", STATE_OFF) await hass.async_block_till_done() assert hass.states.get("switch.test_template_switch").state == STATE_UNAVAILABLE
84,345
282,896
1,278
bots/etf/tops.py
247
84
def etfs_disc_command(sort=""): # Debug i
Discord bot massive improvement (#1481) * allow logs feature flag * Adding log collection md * upload last log at startup * additions/refractor * refactor * lint/black ++ * disc * TimeRotating Logger and upload to s3 * corrected regex error * makeup for config * logging/disc/sia/etf/++ * append .log before uploading * process to upload logs to s3 * candle ta/etfmcds * fix * ta candles * implement presignedURL * fixed regex * ma's in 1 cmd, delete older files * refactor ta candle * updates * black * moon? * Logger uploader * rotate every hour * only archive if successful * chavis suggestions * windows * ta * commands_dict update * discord tacmds * log_collection error fix * fix * fix * pylint * bb fix * only log filesize * fixes * discord logs * Delete log_collection.md * fixes for other bots on images * bots image upload fix * updated helpers/load candle * more ta cc/housekeeping/refactors/slashcmds * update bots cmds_dict * adjustments to font size/fixes * test fixs/disc earnings * missed a spot * fixes had > revesred * reversed the >< again oops * remove logger branch code blocking tests * black fix * fix missing sources in docstr/daily candle dt tz * load_candle refactor with docstring * moved insiders to disc * Lucas logging changes * Fixing log_collection.md * testing scenario * more ta converted * more ta * Update config_terminal : remove print of verbosity * table cfg/fix matplt/ screener + * fix * what's sleep? 1 more line.. or 2. scr df2img * juan more. fix news 1m chart issue * ticker.upper() fixes * Update log collection * Updating log collection - change tmp folder Co-authored-by: LBolte29 <lbolte@gmx.net> Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: LBolte29 <97528701+LBolte29@users.noreply.github.com> Co-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com> Co-authored-by: didierlopes.eth <dro.lopes@campus.fct.unl.pt>
etfs_disc_command
50cafd500ece43df98e3cf076d81084b2806ea03
OpenBBTerminal
tops.py
18
98
https://github.com/OpenBB-finance/OpenBBTerminal.git
10
599
0
146
945
Python
{ "docstring": "Displays ETF's Top Gainers/Decliners, Most Active [Wall Street Journal]", "language": "en", "n_whitespaces": 9, "n_words": 9, "vocab_size": 9 }
def etfs_disc_command(sort=""): # Debug if cfg.DEBUG: logger.debug("etfs") df_etfs = wsj_model.etf_movers(sort, export=True) if df_etfs.empty: raise Exception("No available data found") df_etfs.set_index(" ", inplace=True) prfx = "Top" if sort == "active": prfx = "Most" title = f"ETF Movers ({prfx} {sort.capitalize()})" dindex = len(df_etfs.index) if dindex > 15: embeds: list = [] # Output i, i2, end = 0, 0, 15 df_pg, embeds_img, images_list = [], [], [] while i < dindex: df_pg = df_etfs.iloc[i:end] df_pg.append(df_pg) fig = df2img.plot_dataframe( df_pg, fig_size=(1200, (40 + (40 * dindex))), col_width=[1, 9, 1.5, 1.5, 1.5, 1.5], tbl_header=cfg.PLT_TBL_HEADER, tbl_cells=cfg.PLT_TBL_CELLS, font=cfg.PLT_TBL_FONT, row_fill_color=cfg.PLT_TBL_ROW_COLORS, paper_bgcolor="rgba(0, 0, 0, 0)", ) fig.update_traces(cells=(dict(align=["left"]))) imagefile = "disc-etfs.png" imagefile = helpers.save_image(imagefile, fig) if cfg.IMAGES_URL or cfg.IMGUR_CLIENT_ID != "REPLACE_ME": image_link = cfg.IMAGES_URL + imagefile images_list.append(imagefile) else: imagefile_save = cfg.IMG_DIR / imagefile uploaded_image = gst_imgur.upload_image( imagefile_save, title="something" ) image_link = uploaded_image.link os.remove(imagefile_save) embeds_img.append( f"{image_link}", ) embeds.append( disnake.Embed( title=title, colour=cfg.COLOR, ), ) i2 += 1 i += 15 end += 15 # Author/Footer for i in range(0, i2): embeds[i].set_author( name=cfg.AUTHOR_NAME, url=cfg.AUTHOR_URL, icon_url=cfg.AUTHOR_ICON_URL, ) embeds[i].set_footer( text=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) i = 0 for i in range(0, i2): embeds[i].set_image(url=embeds_img[i]) i += 1 embeds[0].set_footer(text=f"Page 1 of {len(embeds)}") choices = [ disnake.SelectOption(label="Home", value="0", emoji="🟢"), ] output = { "view": Menu, "title": title, "embed": embeds, "choices": choices, "embeds_img": embeds_img, "images_list": images_list, } else: fig = df2img.plot_dataframe( df_etfs, fig_size=(1200, (40 + (40 * dindex))), col_width=[1, 9, 1.5, 1.5, 1.5, 1.5], tbl_header=cfg.PLT_TBL_HEADER, tbl_cells=cfg.PLT_TBL_CELLS, font=cfg.PLT_TBL_FONT, row_fill_color=cfg.PLT_TBL_ROW_COLORS, paper_bgcolor="rgba(0, 0, 0, 0)", ) fig.update_traces(cells=(dict(align=["left"]))) imagefile = helpers.save_image("disc-etfs.png", fig) output = { "title": title, "imagefile": imagefile, } return output
35,584
153,753
75
modin/core/execution/ray/implementations/pandas_on_ray/partitioning/partition.py
15
12
def get(self):
FEAT-#4371: Add logging to Modin (#4372) Co-authored-by: Devin Petersohn <devin.petersohn@gmail.com> Co-authored-by: Mahesh Vashishtha <mvashishtha@users.noreply.github.com> Co-authored-by: Anatoly Myachev <anatoliimyachev@mail.com> Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru> Signed-off-by: Naren Krishna <naren@ponder.io>
get
49fc2cf3733f20ac6cf8a7c61e42ef7aa5cf4b03
modin
partition.py
10
8
https://github.com/modin-project/modin.git
2
50
0
13
101
Python
{ "docstring": "\n Get the object wrapped by this partition out of the Plasma store.\n\n Returns\n -------\n pandas.DataFrame\n The object from the Plasma store.\n ", "language": "en", "n_whitespaces": 68, "n_words": 21, "vocab_size": 16 }
def get(self): logger = get_logger() logger.debug(f"ENTER::Partition.get::{self._identity}") if len(self.call_queue): self.drain_call_queue() result = ray.get(self.oid) logger.debug(f"EXIT::Partition.get::{self._identity}") return result
@pytest.fixture
9,197
47,660
243
tests/sensors/test_external_task_sensor.py
111
36
def dag_bag_ext(): clear_db_runs() dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False) dag_0 = DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None) task_a_0 = EmptyOperator(task_id="task_a_0", dag=dag_0) task_b_0 = ExternalTaskMarker( task_id="task_b_0", external_dag_id="dag_1", external_task_id="task_a_1", recursion_depth=3, dag=dag_0 ) task_a_0 >> task_b_0 dag_1 = DAG("dag_1", start_date=DEFAULT_DATE, schedule_interval=None) task_a_1 = ExternalTaskSensor( task_id="task_a_1", external_dag_id=dag_0.dag_id, external_task_id=task_b_0.task_id, dag=dag_1 ) task_b_1 = ExternalTaskMarker( task_id="task_b_1", external_dag_id="dag_2", external_task_id="task_a_2", recursion_depth=2, dag=dag_1 ) task_a_1 >> task_b_1 dag_2 = DAG("dag_2", start_date=DEFAULT_DATE, schedule_interval=None) task_a_2 = ExternalTaskSensor( task_id="task_a_2", external_dag_id=dag_1.dag_id, external_task_id=task_b_1.task_id, dag=dag_2 ) task_b_2 = ExternalTaskMarker( task_id="task_b_2", external_dag_id="dag_3", external_task_id="task_a_3", recursion_depth=1, dag=dag_2 ) task_a_2 >> task_b_2 dag_3 = DAG("dag_3", start_date=DEFAULT_DATE, schedule_interval=None) task_a_3 = ExternalTaskSensor( task_id="task_a_3", external_dag_id=dag_2.
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
dag_bag_ext
49e336ae0302b386a2f47269a6d13988382d975f
airflow
test_external_task_sensor.py
10
35
https://github.com/apache/airflow.git
2
290
1
69
460
Python
{ "docstring": "\n Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies\n set up using ExternalTaskMarker and ExternalTaskSensor.\n\n dag_0: task_a_0 >> task_b_0\n |\n |\n dag_1: ---> task_a_1 >> task_b_1\n |\n |\n dag_2: ---> task_a_2 >> task_b_2\n |\n |\n dag_3: ---> task_a_3 >> task_b_3\n ", "language": "en", "n_whitespaces": 480, "n_words": 45, "vocab_size": 35 }
def dag_bag_ext(): clear_db_runs() dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False) dag_0 = DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None) task_a_0 = EmptyOperator(task_id="task_a_0", dag=dag_0) task_b_0 = ExternalTaskMarker( task_id="task_b_0", external_dag_id="dag_1", external_task_id="task_a_1", recursion_depth=3, dag=dag_0 ) task_a_0 >> task_b_0 dag_1 = DAG("dag_1", start_date=DEFAULT_DATE, schedule_interval=None) task_a_1 = ExternalTaskSensor( task_id="task_a_1", external_dag_id=dag_0.dag_id, external_task_id=task_b_0.task_id, dag=dag_1 ) task_b_1 = ExternalTaskMarker( task_id="task_b_1", external_dag_id="dag_2", external_task_id="task_a_2", recursion_depth=2, dag=dag_1 ) task_a_1 >> task_b_1 dag_2 = DAG("dag_2", start_date=DEFAULT_DATE, schedule_interval=None) task_a_2 = ExternalTaskSensor( task_id="task_a_2", external_dag_id=dag_1.dag_id, external_task_id=task_b_1.task_id, dag=dag_2 ) task_b_2 = ExternalTaskMarker( task_id="task_b_2", external_dag_id="dag_3", external_task_id="task_a_3", recursion_depth=1, dag=dag_2 ) task_a_2 >> task_b_2 dag_3 = DAG("dag_3", start_date=DEFAULT_DATE, schedule_interval=None) task_a_3 = ExternalTaskSensor( task_id="task_a_3", external_dag_id=dag_2.dag_id, external_task_id=task_b_2.task_id, dag=dag_3 ) task_b_3 = EmptyOperator(task_id="task_b_3", dag=dag_3) task_a_3 >> task_b_3 for dag in [dag_0, dag_1, dag_2, dag_3]: dag_bag.bag_dag(dag=dag, root_dag=dag) yield dag_bag clear_db_runs() @pytest.fixture
@image_comparison(['constrained_layout4.png'])
22,615
107,160
93
lib/matplotlib/tests/test_constrainedlayout.py
34
16
def test_constrained_layout3(): fig, axs = plt.subplots(2, 2, layout="constrained") for nn, ax in enumerate(axs.flat): pcm = example_pcolor(ax, fontsize=24) if nn == 3: pad = 0.08 else: pad = 0.02 # default fig.colorbar(pcm, ax=ax, pad=pad) @image_comparison(['constraine
ENH: implement and use base layout_engine for more flexible layout.
test_constrained_layout3
ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22
matplotlib
test_constrainedlayout.py
11
9
https://github.com/matplotlib/matplotlib.git
3
74
1
30
127
Python
{ "docstring": "Test constrained_layout for colorbars with subplots", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
def test_constrained_layout3(): fig, axs = plt.subplots(2, 2, layout="constrained") for nn, ax in enumerate(axs.flat): pcm = example_pcolor(ax, fontsize=24) if nn == 3: pad = 0.08 else: pad = 0.02 # default fig.colorbar(pcm, ax=ax, pad=pad) @image_comparison(['constrained_layout4.png'])
40,114
167,771
21
pandas/core/groupby/groupby.py
7
9
def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: return self.grouper.indi
TYP: more return annotations in core/ (#47618) * TYP: more return annotations in core/ * from __future__ import annotations * more __future__
indices
f65417656ba8c59438d832b6e2a431f78d40c21c
pandas
groupby.py
7
5
https://github.com/pandas-dev/pandas.git
1
26
0
7
41
Python
{ "docstring": "\n Dict {group name -> group indices}.\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: return self.grouper.indices
12,773
61,950
146
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py
42
13
def get_hash(self, data, hasher=None): if
upd; format
get_hash
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
database.py
12
12
https://github.com/jindongwang/transferlearning.git
3
89
0
25
151
Python
{ "docstring": "\n Get the hash of some data, using a particular hash algorithm, if\n specified.\n\n :param data: The data to be hashed.\n :type data: bytes\n :param hasher: The name of a hash implementation, supported by hashlib,\n or ``None``. Examples of valid values are ``'sha1'``,\n ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and\n ``'sha512'``. If no hasher is specified, the ``hasher``\n attribute of the :class:`InstalledDistribution` instance\n is used. If the hasher is determined to be ``None``, MD5\n is used as the hashing algorithm.\n :returns: The hash of the data. If a hasher was explicitly specified,\n the returned hash will be prefixed with the specified hasher\n followed by '='.\n :rtype: str\n ", "language": "en", "n_whitespaces": 327, "n_words": 104, "vocab_size": 70 }
def get_hash(self, data, hasher=None): if hasher is None: hasher = self.hasher if hasher is None: hasher = hashlib.md5 prefix = '' else: hasher = getattr(hashlib, hasher) prefix = '%s=' % self.hasher digest = hasher(data).digest() digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') return '%s%s' % (prefix, digest)
30,656
135,565
173
rllib/utils/tests/test_actor_manager.py
63
23
def test_async_call_same_actor_multiple_times(self): actors = [Actor.remote(i, maybe_crash=False) for i in range(4)] manager = FaultTolerantActorManager(actors=actors) # 2 asynchronous call to actor 0. num_of_calls = manager.foreach_actor_async( lambda w: w.call(), h
[RLlib] Introduce FaultTolerantActorManager (#29703) Signed-off-by: Jun Gong <jungong@anyscale.com>
test_async_call_same_actor_multiple_times
d329147ae28c57b290f6b932f9f3044523f67c4e
ray
test_actor_manager.py
11
11
https://github.com/ray-project/ray.git
3
107
0
51
168
Python
{ "docstring": "Test multiple asynchronous remote calls to the same actor.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def test_async_call_same_actor_multiple_times(self): actors = [Actor.remote(i, maybe_crash=False) for i in range(4)] manager = FaultTolerantActorManager(actors=actors) # 2 asynchronous call to actor 0. num_of_calls = manager.foreach_actor_async( lambda w: w.call(), healthy_only=False, remote_actor_indices=[0, 0], ) self.assertEqual(num_of_calls, 2) # Now, let's actually fetch the results. results = manager.fetch_ready_async_reqs(timeout_seconds=None) # Returns 1 and 2, representing the first and second calls to actor 0. self.assertEqual([r.get() for r in results.ignore_errors()], [1, 2])
20,731
101,313
389
scripts/fsmedia.py
119
19
def _load(self): data = {} if not self._is_extract: if not self.have_alignments_file: return data data = super()._load() return data skip_existing = hasattr(self._args, 'skip_existing') and self._args.skip_existing skip_faces = hasattr(self._args, 'skip_faces') and self._args.skip_faces if not skip_existing and not skip_faces: logge
bugfix: debug landmarks
_load
9e503bdaa2bfe2baaea50ad2e4bf742f309d9d10
faceswap
fsmedia.py
14
24
https://github.com/deepfakes/faceswap.git
15
171
0
73
290
Python
{ "docstring": " Override the parent :func:`~lib.align.Alignments._load` to handle skip existing\n frames and faces on extract.\n\n If skip existing has been selected, existing alignments are loaded and returned to the\n calling script.\n\n Returns\n -------\n dict\n Any alignments that have already been extracted if skip existing has been selected\n otherwise an empty dictionary\n ", "language": "en", "n_whitespaces": 121, "n_words": 49, "vocab_size": 37 }
def _load(self): data = {} if not self._is_extract: if not self.have_alignments_file: return data data = super()._load() return data skip_existing = hasattr(self._args, 'skip_existing') and self._args.skip_existing skip_faces = hasattr(self._args, 'skip_faces') and self._args.skip_faces if not skip_existing and not skip_faces: logger.debug("No skipping selected. Returning empty dictionary") return data if not self.have_alignments_file and (skip_existing or skip_faces): logger.warning("Skip Existing/Skip Faces selected, but no alignments file found!") return data data = super()._load() if skip_faces: # Remove items from alignments that have no faces so they will # be re-detected del_keys = [key for key, val in data.items() if not val["faces"]] logger.debug("Frames with no faces selected for redetection: %s", len(del_keys)) for key in del_keys: if key in data: logger.trace("Selected for redetection: '%s'", key) del data[key] return data
@log_start_end(log=logger)
85,189
285,147
24
openbb_terminal/stocks/discovery/yahoofinance_model.py
9
7
def get_gtech() -> pd.DataFrame: return get_df( "https://finance.y
Fixed bad yfinance urls (#2282)
get_gtech
bd12c203a0585dab6ca3ff81c3b4500e088b41d6
OpenBBTerminal
yahoofinance_model.py
8
11
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
14
1
9
40
Python
{ "docstring": "Get technology stocks with revenue and earnings growth in excess of 25%. [Source: Yahoo Finance]\n\n Returns\n -------\n pd.DataFrame\n Growth technology stocks\n ", "language": "en", "n_whitespaces": 40, "n_words": 21, "vocab_size": 19 }
def get_gtech() -> pd.DataFrame: return get_df( "https://finance.yahoo.com/screener/predefined/growth_technology_stocks" ) @log_start_end(log=logger)
@require_torch
6,455
35,457
100
tests/encoder_decoder/test_modeling_encoder_decoder.py
30
21
def test_bert2gpt2_summarization(self): model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16") model.to(torch_device) tokenizer_in = AutoTokenizer.from_pretrained("bert-base-case
[Test refactor 1/5] Per-folder tests reorganization (#15725) * Per-folder tests reorganization Co-authored-by: sgugger <sylvain.gugger@gmail.com> Co-authored-by: Stas Bekman <stas@stason.org>
test_bert2gpt2_summarization
29c10a41d04f855c433a6cde7797b325651417d2
transformers
test_modeling_encoder_decoder.py
12
11
https://github.com/huggingface/transformers.git
1
89
1
23
162
Python
{ "docstring": "(CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 1856, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which \"369 went to war for the Confederate States and seven for the Union Army,\" the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 \"colonies\" seeking full membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on the fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for \"health and safety incidents\" since 2010. At least 30 of the incidents involved hazing, and dozens more involved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members allegedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a fraternity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. \"The media has labeled us as the 'nation's deadliest fraternity,' \" Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. \"As a result, we are paying Lloyd's of London the highest insurance rates in the Greek-letter world,\" Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing incidents.SAS Alpha Epsilon suspended the students, but university president says it's permanent.\\nThe fraternity has had to deal with a string of student deaths since 2010.\\nSAS has more than 200,000 members, many of whom are students.\\nA student died while being forced into excessive alcohol consumption.", "language": "en", "n_whitespaces": 402, "n_words": 403, "vocab_size": 251 }
def test_bert2gpt2_summarization(self): model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16") model.to(torch_device) tokenizer_in = AutoTokenizer.from_pretrained("bert-base-cased") tokenizer_out = AutoTokenizer.from_pretrained("../gpt2") ARTICLE_STUDENTS = EXPECTED_SUMMARY_STUDENTS = input_dict = tokenizer_in(ARTICLE_STUDENTS, return_tensors="pt") output_ids = model.generate(input_dict["input_ids"].to(torch_device)) summary = tokenizer_out.batch_decode(output_ids, skip_special_tokens=True) self.assertEqual(summary, [EXPECTED_SUMMARY_STUDENTS]) @require_torch
5,294
30,056
106
saleor/permission/management.py
27
8
def _get_builtin_permissions(opts): # noqa: D205, D212 perms = [] for action in opts.default_permissions: perms.append( ( get_permission_codename(action, opts), "Can %s %s" % (action, opts.verbose_name_raw), ) )
Move create_permission post migrate signal
_get_builtin_permissions
3981ae09888569eafe9cbb3a0c659dd337028fa4
saleor
management.py
13
10
https://github.com/saleor/saleor.git
2
43
0
25
70
Python
{ "docstring": "\n Return (codename, name) for all autogenerated permissions.\n By default, this is ('add', 'change', 'delete', 'view')\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 15 }
def _get_builtin_permissions(opts): # noqa: D205, D212 perms = [] for action in opts.default_permissions: perms.append( ( get_permission_codename(action, opts), "Can %s %s" % (action, opts.verbose_name_raw), ) ) return perms
121,117
337,787
84
src/accelerate/accelerator.py
16
9
def accumulate(self, model): self._do_sync() if self.sync_gradients: context = contextl
Introduce automatic gradient accumulation wrapper + fix a few test issues (#484) * Have accelerator handle gradient accumulation Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
accumulate
86ce737d7fc94f8000dbd5e13021d0411bb4204a
accelerate
accelerator.py
10
8
https://github.com/huggingface/accelerate.git
2
37
0
14
67
Python
{ "docstring": "\n A context manager that will lightly wrap around and perform gradient accumulation automatically\n\n Args:\n model (`torch.nn.Module`):\n PyTorch Module that was prepared with `Accelerator.prepare`\n ", "language": "en", "n_whitespaces": 71, "n_words": 23, "vocab_size": 22 }
def accumulate(self, model): self._do_sync() if self.sync_gradients: context = contextlib.nullcontext else: context = self.no_sync with context(model): yield
56,951
223,525
118
python3.10.4/Lib/email/_header_value_parser.py
48
13
def get_attribute(value): attribute = Attribute() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) if value and value[0] in ATTRIBUTE_ENDS: raise errors.H
add python 3.10.4 for windows
get_attribute
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
_header_value_parser.py
12
14
https://github.com/XX-net/XX-Net.git
7
99
0
25
163
Python
{ "docstring": " [CFWS] 1*attrtext [CFWS]\n\n This version of the BNF makes the CFWS explicit, and as usual we use a\n value terminal for the actual run of characters. The RFC equivalent of\n attrtext is the token characters, with the subtraction of '*', \"'\", and '%'.\n We include tab in the excluded set just as we do for token.\n\n ", "language": "en", "n_whitespaces": 73, "n_words": 56, "vocab_size": 43 }
def get_attribute(value): attribute = Attribute() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) if value and value[0] in ATTRIBUTE_ENDS: raise errors.HeaderParseError( "expected token but found '{}'".format(value)) token, value = get_attrtext(value) attribute.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) return attribute, value
@pytest.fixture
35,948
154,378
282
modin/pandas/test/test_io.py
82
21
def eval_to_file(modin_obj, pandas_obj, fn, extension, **fn_kwargs): with ensure_clean_dir() as dirname: unique_filename_modin = get_unique_filename( extension=extension, data_dir=dirname ) unique_filename_pandas = get_unique_filename( extension=extension, data_dir=dirname ) # parameter `max_retries=0` is set for `to_csv` function on Ray engine, # in order to increase the stability of tests, we repeat the call of # the entire function manually last_exception = None for _ in range(3): try: getattr(modin_obj, fn)(unique_filename_modin, **fn_kwargs) except EXCEPTIONS as exc: last_exception = exc continue break else: raise last_exception getattr(pandas_obj, fn)(unique_
TEST-#4879: Use pandas `ensure_clean()` in place of `io_tests_data` (#4881) Signed-off-by: Karthik Velayutham <vkarthik@ponder.io>
eval_to_file
5086a9ea37bc37e6e58da0ceaf5864b16cc8e0ed
modin
test_io.py
14
20
https://github.com/modin-project/modin.git
3
104
1
63
176
Python
{ "docstring": "Helper function to test `to_<extension>` methods.\n\n Args:\n modin_obj: Modin DataFrame or Series to test `to_<extension>` method.\n pandas_obj: Pandas DataFrame or Series to test `to_<extension>` method.\n fn: name of the method, that should be tested.\n extension: Extension of the test file.\n ", "language": "en", "n_whitespaces": 74, "n_words": 40, "vocab_size": 27 }
def eval_to_file(modin_obj, pandas_obj, fn, extension, **fn_kwargs): with ensure_clean_dir() as dirname: unique_filename_modin = get_unique_filename( extension=extension, data_dir=dirname ) unique_filename_pandas = get_unique_filename( extension=extension, data_dir=dirname ) # parameter `max_retries=0` is set for `to_csv` function on Ray engine, # in order to increase the stability of tests, we repeat the call of # the entire function manually last_exception = None for _ in range(3): try: getattr(modin_obj, fn)(unique_filename_modin, **fn_kwargs) except EXCEPTIONS as exc: last_exception = exc continue break else: raise last_exception getattr(pandas_obj, fn)(unique_filename_pandas, **fn_kwargs) assert assert_files_eq(unique_filename_modin, unique_filename_pandas) @pytest.fixture
50,522
203,731
70
django/contrib/contenttypes/fields.py
16
9
def _is_matching_generic_foreign_key(self, field): return ( isinstance(field, GenericForeignKey) and field.ct_field == self.content_type_field_name and field.fk_field == self.object_id_field_name )
Refs #33476 -- Reformatted code with Black.
_is_matching_generic_foreign_key
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
fields.py
10
6
https://github.com/django/django.git
3
33
0
14
52
Python
{ "docstring": "\n Return True if field is a GenericForeignKey whose content type and\n object id fields correspond to the equivalent attributes on this\n GenericRelation.\n ", "language": "en", "n_whitespaces": 51, "n_words": 22, "vocab_size": 22 }
def _is_matching_generic_foreign_key(self, field): return ( isinstance(field, GenericForeignKey) and field.ct_field == self.content_type_field_name and field.fk_field == self.object_id_field_name )
20,628
101,207
102
lib/align/alignments.py
25
13
def hashes_to_frame(self): if not self._hashes_to_frame: logger.debug("Generating hashes to frame") for frame_name, val in self._data.items(): for idx, face in enumerate(val["faces"]): sel
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
hashes_to_frame
5e73437be47f2410439a3c6716de96354e6a0c94
faceswap
alignments.py
17
7
https://github.com/deepfakes/faceswap.git
4
67
0
23
112
Python
{ "docstring": " dict: The SHA1 hash of the face mapped to the frame(s) and face index within the frame\n that the hash corresponds to. The structure of the dictionary is:\n\n {**SHA1_hash** (`str`): {**filename** (`str`): **face_index** (`int`)}}.\n\n Notes\n -----\n This method is depractated and exists purely for updating legacy hash based alignments\n to new png header storage in :class:`lib.align.update_legacy_png_header`.\n\n The first time this property is referenced, the dictionary will be created and cached.\n Subsequent references will be made to this cached dictionary.\n ", "language": "en", "n_whitespaces": 143, "n_words": 79, "vocab_size": 58 }
def hashes_to_frame(self): if not self._hashes_to_frame: logger.debug("Generating hashes to frame") for frame_name, val in self._data.items(): for idx, face in enumerate(val["faces"]): self._hashes_to_frame.setdefault(face["hash"], {})[frame_name] = idx return self._hashes_to_frame
117,338
320,770
466
qutebrowser/completion/completiondelegate.py
90
55
def _get_textdoc(self, index): assert self._opt is not None # FIXME we probably should do eliding here. See # qcommonstyle.cpp:viewItemDrawText # https://github.com/qutebrowser/qutebrowser/issues/118 text_option = QTextOption() if self._opt.features & QStyleOptionViewItem.WrapText: text_option.setWrapMode(QTextOption.WordWrap) else: text_option.setWrapMode(QTextOption.ManualWrap) text_option.setTextDirection(self._opt.direction) text_option.setAlignment(QStyle.visualAlignment( self._opt.direction, self._opt.displayAlignment)) if self._doc is not None: self._doc.deleteLater() self._doc = QTextDocument(self) self._doc.setDefaultFont(self._opt.font) self._doc.setDefaultTextOption(text_option) self._doc.setDocumentMargin(2) if index.parent().isValid(): view = self.parent() assert isinstance(view, completionwidget.CompletionView), view pattern = view.pattern columns_to_filter = index.model().columns_to_filter(index) if index.column() in columns_to_filter and pattern: if self._opt.state & QStyle.State_Selected: color = config.val.colors.completion.item.selected.match.fg else: color = config.val.colors.completion.match.fg _Highlighter(self._doc, pattern, color) self._doc.setPlainText(self._opt.text) else: self._doc.setHtml( '<span style="font: {};">{}</span>'.
mypy: Upgrade to PyQt5-stubs 5.15.6.0 For some unknown reason, those new stubs cause a *lot* of things now to be checked by mypy which formerly probably got skipped due to Any being implied somewhere. The stubs themselves mainly improved, with a couple of regressions too. In total, there were some 337 (!) new mypy errors. This commit fixes almost all of them, and the next commit improves a fix to get things down to 0 errors again. Overview of the changes: ==== qutebrowser/app.py - Drop type ignore due to improved stubs. ==== qutebrowser/browser/browsertab.py - Specify the type of _widget members more closely than just QWidget. This is debatable: I suppose the abstract stuff shouldn't need to know anything about the concrete backends at all. But it seems like we cut some corners when initially implementing things, and put some code in browsertab.py just because the APIs of both backends happened to be compatible. Perhaps something to reconsider once we drop QtWebKit and hopefully implement a dummy backend. - Add an additional assertion in AbstractAction.run_string. This is already covered by the isinstance(member, self.action_base) above it, but that's too dynamic for mypy to understand. - Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x and y components), not a single int. - Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x and y components), not a single int. - Fix the argument types of AbstractScroller.to_perc, as it's possible to pass fractional percentages too. - Specify the type for AbstractHistoryPrivate._history. See above (_widget) re this being debatable. - Fix the return type of AbstractTabPrivate.event_target(), which can be None (see #3888). - Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS return value), not None. - Fix the argument type for AbstractTabPrivate.toggle_inspector: position can be None to use the last used position. - Declare the type of sub-objects of AbstractTab. - Fix the return value of AbstractTab.icon(), which is the QIcon, not None. ==== qutebrowser/browser/commands.py - Make sure the active window is a MainWindow (with a .win_id attribute). ==== qutebrowser/browser/downloadview.py - Add _model() which makes sure that self.model() is a DownloadModel, not None or any other model. This is needed because other methods access a variety of custom attributes on it, e.g. last_index(). ==== qutebrowser/browser/greasemonkey.py - Add an ignore for AbstractDownload.requested_url which we patch onto the downloads. Probably would be nicer to add it as a proper attribute which always gets set by the DownloadManager. ==== qutebrowser/browser/hints.py - Remove type ignores for QUrl.toString(). - Add a new type ignore for combining different URL flags (which works, but is not exactly type safe... still probably a regression in the stubs). - Make sure the things we get back from self._get_keyparser are what we actually expect. Probably should introduce a TypedDict (and/or overloads for _get_keyparser with typing.Literal) to teach mypy about the exact return value. See #7098. This is needed because we access Hint/NormalKeyParser-specific attributes such as .set_inhibited_timout() or .update_bindings(). ==== qutebrowser/browser/inspector.py - Similar changes than in browsertab.py to make some types where we share API (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next commit. ==== qutebrowser/browser/network/pac.py - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/qtnetworkdownloads.py - Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an AbstractDownload), so that we can call ._uses_nam() on it. ==== qutebrowser/browser/qutescheme.py - Remove now unneeded type ignore for QUrl flags. ==== qutebrowser/browser/urlmarks.py - Specify the type of UrlMarkManager._lineparser, as those only get initialized in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist. ==== qutebrowser/browser/webelem.py - New casts to turn single KeyboardModifier (enum) entries into KeyboardModifiers (flags). Might not be needed anymore with Qt 6. - With that, casting the final value is now unneeded. ==== qutebrowser/browser/webengine/notification.py - Remove now unneeded type ignore for signal. - Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished() is a QProcess, not just any QObject. ==== qutebrowser/browser/webengine/webenginedownloads.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webengine/webengineelem.py - Specify the type of WebEngineElement._tab. - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webengineinspector.py - See changes to inspector.py and next commit. - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/webengine/webenginequtescheme.py - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webenginesettings.py - Ignore access of .setter attribute which we patch onto QWebEngineProfile. Would be nice to have a subclass or wrapper-class instead. ==== qutebrowser/browser/webengine/webenginetab.py - Specified the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Remove some now-unneeded type ignores for creating FindFlags. - Specify more concrete types for WebEngineTab members where we actually need to access WebEngine-specific attributes. - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webengine/webview.py - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webkit/network/networkreply.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webkit/webkitinspector.py - See changes to inspector.py and next commit. ==== qutebrowser/browser/webkit/webkittab.py - Specify the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Add a type ignore for WebKitAction because our workaround needs to treat them as ints (which is allowed by PyQt, even if not type-safe). - Add new ignores for findText calls: The text is a QString and can be None; the flags are valid despite mypy thinking they aren't (stubs regression?). - Specify the type for WebKitHistoryPrivate._history, because we access WebKit-specific attributes. See above (_widget) re this being debatable. - Make mypy aware that .currentFrame() and .frameAt() can return None (stubs regression?). - Make sure the .page() and .page().networkAccessManager() are our subclasses rather than the more generic QtWebKit objects, as we use custom attributes. - Add new type ignores for signals (stubs regression!) ==== qutebrowser/browser/webkit/webpage.py - Make sure the .networkAccessManager() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. - Replace a cast by a type ignore. The cast didn't work anymore. ==== qutebrowser/browser/webkit/webview.py - Make sure the .page() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. ==== qutebrowser/commands/userscripts.py - Remove now unneeded type ignore for signal. ==== qutebrowser/completion/completer.py - Add a new _completion() getter (which ensures it actually gets the completion view) rather than accessing the .parent() directly (which could be any QObject). ==== qutebrowser/completion/completiondelegate.py - Make sure self.parent() is a CompletionView (no helper method as there is only one instance). - Remove a now-unneeded type ignore for adding QSizes. ==== qutebrowser/completion/completionwidget.py - Add a ._model() getter which ensures that we get a CompletionModel (with custom attributes) rather than Qt's .model() which can be any QAbstractItemModel (or None). - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/completion/models/completionmodel.py - Remove now unneeded type ignores for signals. - Ignore a complaint about .set_pattern() not being defined. Completion categories don't share any common parent class, so it would be good to introduce a typing.Protocol for this. See #7098. ==== qutebrowser/components/misccommands.py - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/components/readlinecommands.py - Make sure QApplication.instance() is a QApplication (and not just a QCoreApplication). This includes the former "not None" check. ==== qutebrowser/components/scrollcommands.py - Add basic annotation for "funcs" dict. Could have a callable protocol to specify it needs a count kwarg, see #7098. ==== qutebrowser/config/stylesheet.py - Correctly specify that stylesheet apply to QWidgets, not any QObject. - Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy about this with overloads and protocols (stylesheet for set_register being None => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not worth the troble. See #7098. ==== qutebrowser/keyinput/keyutils.py - Remove some now-unneeded type ignores and add a cast for using a single enum value as flags. Might need to look at this again with Qt 6 support. ==== qutebrowser/keyinput/modeman.py - Add a FIXME for using a TypedDict, see comments for hints.py above. ==== qutebrowser/mainwindow/mainwindow.py - Remove now-unneeded type ignores for calling with OR-ed flags. - Improve where we cast from WindowType to WindowFlags, no int needed - Use new .tab_bar() getter, see below. ==== qutebrowser/mainwindow/prompt.py - Remove now-unneeded type ignores for calling with OR-ed flags. ==== qutebrowser/mainwindow/statusbar/bar.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/statusbar/command.py - Fix type for setText() override (from QLineEdit): text can be None (QString in C++). ==== qutebrowser/mainwindow/statusbar/url.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/tabbedbrowser.py - Specify that TabDeque manages browser tabs, not any QWidgets. It accesses AbstractTab-specific attributes. - Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access .maybe_hide. - Fix the annotations for stored marks: Scroll positions are a QPoint, not int. - Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and .widget(), which ensures that the return values are valid AbstractTabs (or None for _tab_by_idx). This is needed because we access AbstractTab-specific attributes. - For some places, where the tab can be None, continue using .currentTab() but add asserts. - Remove some now-unneeded [unreachable] ignores, as mypy knows about the None possibility now. ==== qutebrowser/mainwindow/tabwidget.py - Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and .widget() are of type TabBar and AbstractTab, respectively. - Add additional assertions where we expect ._tab_by_idx() to never be None. - Remove dead code in get_tab_fields for handling a None y scroll position. I was unable to find any place in the code where this could be set to None. - Remove some now-unneeded type ignores and casts, as mypy now knows that _type_by_idx() could be None. - Work around a strange instance where mypy complains about not being able to find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility, despite it clearly being shown as a bool *inside* that class without any annotation. - Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in fact a TabWidget. ==== qutebrowser/misc/crashsignal.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/editor.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/ipc.py - Remove now unneeded type ignores for signals. - Add new type ignores for .error() which is both a signal and a getter (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was renamed to errorOccurred in 5.15. ==== qutebrowser/misc/objects.py - Make sure mypy knows that objects.app is our custom Application (with custom attributes) rather than any QApplication. ==== qutebrowser/utils/objreg.py - Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol, but ideally, the whole objreg stuff should die one day anyways. ==== tests/unit/completion/test_completer.py - Make CompletionWidgetStub inherit from CompletionView so that it passes the new isinstance() asserts in completer.py (see above).
_get_textdoc
a20bb67a878b2e68abf8268c1b0a27f018d01352
qutebrowser
completiondelegate.py
19
33
https://github.com/qutebrowser/qutebrowser.git
7
292
0
68
469
Python
{ "docstring": "Create the QTextDocument of an item.\n\n Args:\n index: The QModelIndex of the item to draw.\n ", "language": "en", "n_whitespaces": 40, "n_words": 15, "vocab_size": 13 }
def _get_textdoc(self, index): assert self._opt is not None # FIXME we probably should do eliding here. See # qcommonstyle.cpp:viewItemDrawText # https://github.com/qutebrowser/qutebrowser/issues/118 text_option = QTextOption() if self._opt.features & QStyleOptionViewItem.WrapText: text_option.setWrapMode(QTextOption.WordWrap) else: text_option.setWrapMode(QTextOption.ManualWrap) text_option.setTextDirection(self._opt.direction) text_option.setAlignment(QStyle.visualAlignment( self._opt.direction, self._opt.displayAlignment)) if self._doc is not None: self._doc.deleteLater() self._doc = QTextDocument(self) self._doc.setDefaultFont(self._opt.font) self._doc.setDefaultTextOption(text_option) self._doc.setDocumentMargin(2) if index.parent().isValid(): view = self.parent() assert isinstance(view, completionwidget.CompletionView), view pattern = view.pattern columns_to_filter = index.model().columns_to_filter(index) if index.column() in columns_to_filter and pattern: if self._opt.state & QStyle.State_Selected: color = config.val.colors.completion.item.selected.match.fg else: color = config.val.colors.completion.match.fg _Highlighter(self._doc, pattern, color) self._doc.setPlainText(self._opt.text) else: self._doc.setHtml( '<span style="font: {};">{}</span>'.format( html.escape(config.val.fonts.completion.category), html.escape(self._opt.text)))
1,585
9,296
159
reconstruction/ostec/external/face_detector/detect_face.py
34
11
def feed(self, *args): assert len(args) != 0 self.terminals = []
initialize ostec
feed
7375ee364e0df2a417f92593e09557f1b2a3575a
insightface
detect_face.py
16
11
https://github.com/deepinsight/insightface.git
4
65
0
32
107
Python
{ "docstring": "Set the input(s) for the next operation by replacing the terminal nodes.\n The arguments can be either layer names or the actual layers.\n ", "language": "en", "n_whitespaces": 37, "n_words": 23, "vocab_size": 20 }
def feed(self, *args): assert len(args) != 0 self.terminals = [] for fed_layer in args: if isinstance(fed_layer, str): try: fed_layer = self.layers[fed_layer] except KeyError: raise KeyError('Unknown layer name fed: %s' % fed_layer) self.terminals.append(fed_layer) return self
80,862
271,843
315
keras/engine/training_utils_v1.py
101
16
def unpack_iterator_input(iterator): try: next_element = iterator.get_next() except tf.errors.OutOfRangeError: raise RuntimeError( "Your dataset iterator ran out of data; " "Make sure that your dataset can generate " "required number of samples." ) if isinstance(next_element, (list, tuple)): if len(next_element) not in [2, 3]: raise ValueError( "Please pr
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
unpack_iterator_input
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
training_utils_v1.py
14
26
https://github.com/keras-team/keras.git
5
105
0
67
180
Python
{ "docstring": "Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`.\n\n Args:\n iterator: Instance of a dataset iterator.\n\n Returns:\n Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.\n ", "language": "en", "n_whitespaces": 52, "n_words": 33, "vocab_size": 25 }
def unpack_iterator_input(iterator): try: next_element = iterator.get_next() except tf.errors.OutOfRangeError: raise RuntimeError( "Your dataset iterator ran out of data; " "Make sure that your dataset can generate " "required number of samples." ) if isinstance(next_element, (list, tuple)): if len(next_element) not in [2, 3]: raise ValueError( "Please provide model inputs as a list or tuple of 2 or 3 " "elements: (input, target) or (input, target, sample_weights) " "Received %s" % next_element ) if len(next_element) == 2: x, y = next_element weights = None else: x, y, weights = next_element else: x = next_element y = None weights = None return x, y, weights
26,798
120,211
11
tests/mesh_utils_test.py
9
3
def mock_2x2x4_devices(one_device_per_chip): return mock_devices(2, 2, 4, 'TPU v4', one_device_pe
[mesh_utils] Support creating device meshes for hybrid networks Also makes some NFCs to other mesh_utils code. PiperOrigin-RevId: 442581767
mock_2x2x4_devices
3f9e45e0c5b035de27b14588cd3b4cfd5f3c1f04
jax
mesh_utils_test.py
8
2
https://github.com/google/jax.git
1
19
0
9
31
Python
{ "docstring": "Hard-coded reproduction of jax.devices() output on 2x2x4.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
def mock_2x2x4_devices(one_device_per_chip): return mock_devices(2, 2, 4, 'TPU v4', one_device_per_chip)
1,079
6,855
61
ludwig/export.py
31
15
def export_triton(model_path, output_path="model_repository", model_name="ludwig_model", model_version=1, **kwargs): logger.info(f"Model path: {model_path}") logger.info(f"Output path: {output_path}"
Adding new export for Triton (#2078) * Adding new export for triton. Fixes for load model for neuropod export, add output dict format * Adding test for triton. Fix to cast int to string for os.path.join. Added annotation for neurpod * Minor tweaks to config.pbtxt output * Remove logger that is not being used * Restrict torchmetrics<0.9 and whylogs<1.0 until compatibility fixed * Update export_triton to return model path, and added docstrings * Update api to return both model path and config path Co-authored-by: Travis Addair <tgaddair@gmail.com>
export_triton
698a0e0f1ed95d20116dc51aa9c6a7ed48446deb
ludwig
export.py
9
10
https://github.com/ludwig-ai/ludwig.git
1
90
0
27
170
Python
{ "docstring": "Exports a model in torchscript format with config for Triton serving.\n\n # Inputs\n\n :param model_path: (str) filepath to pre-trained model.\n :param output_path: (str, default: `'model_repository'`) directory to store the\n triton models.\n :param model_name: (str, default: `'ludwig_model'`) save triton under this name.\n :param model_name: (int, default: `1`) save neuropod under this verison.\n\n # Return\n\n :returns: (`None`)\n ", "language": "en", "n_whitespaces": 87, "n_words": 55, "vocab_size": 42 }
def export_triton(model_path, output_path="model_repository", model_name="ludwig_model", model_version=1, **kwargs): logger.info(f"Model path: {model_path}") logger.info(f"Output path: {output_path}") logger.info(f"Model name: {model_name}") logger.info(f"Model version: {model_version}") logger.info("\n") model = LudwigModel.load(model_path) os.makedirs(output_path, exist_ok=True) utils_export_triton(model, output_path, model_name, model_version) logger.info(f"Saved to: {output_path}")
17,983
85,389
327
src/sentry/eventstore/models.py
93
24
def tags(self) -> Sequence[Tuple[str, str]]: tags_key_column = self._get_column_name(Columns.TAGS_KEY) tags_value_column = self._get_column_name(Columns.TAGS_VALUE) if tags_key_column in self._snuba_data and tags_value_column in self._snuba_data: keys = self._snuba_data[tags_key_column] values = self._snuba_data[tags_value_column] if keys and values and len(keys) == len(values): return sorted(zip(keys, values)) else: return [] # Nodestore implementation try: rv = sorted( (t, v) for t, v in get_path(self.data, "tags", filter=True) or () if t is not None and v is not None ) return rv except ValueError: # at one point Sentry allowed inva
feat(perf_issues): Add `GroupEvent` and split some functionality in `Event` into a base class. (#38143) Since we can now have events with multiple groups, we can no longer rely on the `Event.group` property. This pr adds in a `GroupEvent` subclass that should be passed around wherever we expect an event to have a single `Group` associated with it. `Event` has been split up into `BaseEvent` and `Event`. We will deprecate and remove uses of `group_id` and `group` in the `Event` class going forward. If we need an event with a `Group`, we can use `build_group_events` to fetch all `GroupEvents` associated with the `Event`, or `for_group` if we just need a specific `Event`/`Group` pairing. Going forward, the plan is to store all groups in the `groups` property. This means that error events being sent via eventstream will have their group included in `groups` as well. We'll need to update the errors processor in snuba to look there instead of `group_id`. This seems cleaner long term, instead of having both `group_id` and `group_ids` passed through. To figure out where we need to use `build_group_events` and `for_group` we can do a mix of searching the codebase and commenting out the `group_id` and `group` properties and see how CI goes.
tags
6aaaf5089b2c39757883179df5a8512db3b0c716
sentry
models.py
15
23
https://github.com/getsentry/sentry.git
11
145
0
67
229
Python
{ "docstring": "\n Tags property uses tags from snuba if loaded otherwise falls back to\n nodestore.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
def tags(self) -> Sequence[Tuple[str, str]]: tags_key_column = self._get_column_name(Columns.TAGS_KEY) tags_value_column = self._get_column_name(Columns.TAGS_VALUE) if tags_key_column in self._snuba_data and tags_value_column in self._snuba_data: keys = self._snuba_data[tags_key_column] values = self._snuba_data[tags_value_column] if keys and values and len(keys) == len(values): return sorted(zip(keys, values)) else: return [] # Nodestore implementation try: rv = sorted( (t, v) for t, v in get_path(self.data, "tags", filter=True) or () if t is not None and v is not None ) return rv except ValueError: # at one point Sentry allowed invalid tag sets such as (foo, bar) # vs ((tag, foo), (tag, bar)) return []
77,934
264,988
116
netbox/dcim/tests/test_models.py
38
12
def test_cable_validates_compatible_types(self): # An interface cannot be connected to a power port cable = Cable(a_terminations=[self.interface1, self.interface2], b_terminations=[self.interface3]) with self.assertRaises(ValidationError): cable.clean() # TODO: Remove this? # def
Clean up tests
test_cable_validates_compatible_types
6280398bc17211bbc5b321039144c1eb0461f4a9
netbox
test_models.py
11
4
https://github.com/netbox-community/netbox.git
1
43
0
26
81
Python
{ "docstring": "\n The clean method should have a check to ensure only compatible port types can be connected by a cable\n \n # A cable cannot connect a front port to its corresponding rear port\n # ", "language": "en", "n_whitespaces": 63, "n_words": 33, "vocab_size": 26 }
def test_cable_validates_compatible_types(self): # An interface cannot be connected to a power port cable = Cable(a_terminations=[self.interface1, self.interface2], b_terminations=[self.interface3]) with self.assertRaises(ValidationError): cable.clean() # TODO: Remove this? # def test_cable_front_port_cannot_connect_to_corresponding_rear_port(self): # # cable = Cable(a_terminations=[self.front_port1], b_terminations=[self.rear_port1]) # with self.assertRaises(ValidationError): # cable.clean()
81,490
275,865
766
keras/saving/hdf5_format.py
235
55
def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True): if h5py is None: raise ImportError( "`save_model()` using h5 format requires h5py. Could not " "import h5py." ) # TODO(psv) Add warning when we save models that contain non-serializable # entities like metrics added using `add_metric` and losses added using # `add_loss.` if len(model.weights) != len(model._undeduplicated_weights): logging.warning( "Found duplicated `Variable`s in Model's `weights`. " "This is usually caused by `Variable`s being shared by " "Layers in the Model. These `Variable`s will be treated " "as separate `Variable`s when the Model is restored. To " 'avoid this, please save with `save_format="tf"`.' ) if not isinstance(filepath, h5py.File): # If file exists and should not be overwritten. if not overwrite and os.path.isfile(filepath): proceed = ask_to_proceed_with_overwrite(filepath) if not proceed: return # Try creating dir if not exist dirpath = os.path.dirname(filepath) if not os.path.exists(dirpath): tf.io.gfile.makedirs(dirpath) f = h5py.File(filepath, mode="w") opened_new_file = True else: f = filepath opened_new_file = False try: model_metadata = saving_utils.model_metadata(model, include_optimiz
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
save_model_to_hdf5
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
hdf5_format.py
18
54
https://github.com/keras-team/keras.git
16
290
0
164
490
Python
{ "docstring": "Saves a model to a HDF5 file.\n\n The saved model contains:\n - the model's configuration (topology)\n - the model's weights\n - the model's optimizer's state (if any)\n\n Thus the saved model can be reinstantiated in\n the exact same state, without any of the code\n used for model definition or training.\n\n Args:\n model: Keras model instance to be saved.\n filepath: One of the following:\n - String, path where to save the model\n - `h5py.File` object where to save the model\n overwrite: Whether we should overwrite any existing\n model at the target location, or instead\n ask the user with a manual prompt.\n include_optimizer: If True, save optimizer's state together.\n\n Raises:\n ImportError: if h5py is not available.\n ", "language": "en", "n_whitespaces": 235, "n_words": 114, "vocab_size": 76 }
def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True): if h5py is None: raise ImportError( "`save_model()` using h5 format requires h5py. Could not " "import h5py." ) # TODO(psv) Add warning when we save models that contain non-serializable # entities like metrics added using `add_metric` and losses added using # `add_loss.` if len(model.weights) != len(model._undeduplicated_weights): logging.warning( "Found duplicated `Variable`s in Model's `weights`. " "This is usually caused by `Variable`s being shared by " "Layers in the Model. These `Variable`s will be treated " "as separate `Variable`s when the Model is restored. To " 'avoid this, please save with `save_format="tf"`.' ) if not isinstance(filepath, h5py.File): # If file exists and should not be overwritten. if not overwrite and os.path.isfile(filepath): proceed = ask_to_proceed_with_overwrite(filepath) if not proceed: return # Try creating dir if not exist dirpath = os.path.dirname(filepath) if not os.path.exists(dirpath): tf.io.gfile.makedirs(dirpath) f = h5py.File(filepath, mode="w") opened_new_file = True else: f = filepath opened_new_file = False try: model_metadata = saving_utils.model_metadata(model, include_optimizer) for k, v in model_metadata.items(): if isinstance(v, (dict, list, tuple)): f.attrs[k] = json.dumps( v, default=json_utils.get_json_type ).encode("utf8") else: f.attrs[k] = v model_weights_group = f.create_group("model_weights") save_weights_to_hdf5_group(model_weights_group, model) # TODO(b/128683857): Add integration tests between tf.keras and external # Keras, to avoid breaking TF.js users. if isinstance(model.optimizer, optimizer_experimental.Optimizer): logging.warning( "HDF5 format does not save weights of" " `optimizer_experimental.Optimizer`, your optimizer will" " be recompiled at loading time." ) elif ( include_optimizer and model.optimizer and not isinstance(model.optimizer, optimizer_v1.TFOptimizer) ): save_optimizer_weights_to_hdf5_group(f, model.optimizer) f.flush() finally: if opened_new_file: f.close()
270
2,278
299
packages/syft/src/syft/core/node/common/node_manager/user_manager.py
79
24
def set(self, **kwargs) -> None: # nosec attributes = {} user_id = kwargs["u
replaced all methods of usermanager class, working login Co-Authored By: Ionesio
set
066545e8a88e842aa7d0a5d57bac88716001bced
PySyft
user_manager.py
11
41
https://github.com/OpenMined/PySyft.git
10
205
0
55
351
Python
{ "docstring": "Updates the information for the given user id.\n\n Args:\n user_id (str): unique id of the user in the database.\n email (str, optional): email of the user. Defaults to \"\".\n password (str, optional): password of the user. Defaults to \"\".\n role (int, optional): role of the user. Defaults to 0.\n name (str, optional): name of the user. Defaults to \"\".\n website (str, optional): website of the institution of the user. Defaults to \"\".\n institution (str, optional): name of the institution of the user. Defaults to \"\".\n budget (float, optional): privacy budget allocated to the user. Defaults to 0.0.\n\n Raises:\n UserNotFoundError: Raised when a user does not exits for the given user id.\n Exception: Raised when an invalid argument/property is passed.\n ", "language": "en", "n_whitespaces": 250, "n_words": 119, "vocab_size": 48 }
def set(self, **kwargs) -> None: # nosec attributes = {} user_id = kwargs["user_id"] user = self.first(id_int=int(user_id)) if not user: raise UserNotFoundError for k, v in kwargs.items(): if k in user.__attr_searchable__: attributes[k] = v if kwargs.get("email", None): user.email = kwargs["email"] elif kwargs.get("role", None): user.role = kwargs["role"] elif kwargs.get("name", None): user.name = kwargs["name"] elif kwargs.get("budget", None): user.budget = kwargs["budget"] elif kwargs.get("website", None): user.website = kwargs["website"] elif kwargs.get("institution", None): user.institution = kwargs["institution"] else: raise Exception attributes["__blob__"] = user.to_bytes() self.update_one({"id_int": int(user_id)}, {"$set": attributes})
18,963
92,964
336
tests/sentry/snuba/metrics/fields/test_base.py
62
31
def test_get_entity_and_validate_dependency_tree_of_a_single_entity_derived_metric(self): use_case_id = UseCaseKey.RELEASE_HEALTH expected_derived_metrics_entities = { SessionMRI.ALL.value: "metrics_counters", SessionMRI.ALL_USER.value: "metrics_sets", SessionMRI.CRASHED.value: "metrics_counters", SessionMRI.CRASHED_USER.value: "metrics_sets", SessionMRI.ABNORMAL.value: "metrics_counters", SessionMRI.ABNORMAL_USER.value: "metrics_sets", SessionMRI.CRASH_FREE_RATE.value: "metrics_counters", SessionMRI.CRASH_FREE_USER_RATE.value: "metrics_sets", SessionMRI.ERRORED_PREAGGREGATED.value: "metrics_counters", SessionMRI.ERRORED_SET.value: "metrics_sets", SessionMRI.ERRORED_USER_ALL.value: "metrics_sets", SessionMRI.CRASHED_AND_ABNORMAL_USER.value: "metrics_sets", SessionMRI.ERRORED_USER.value: "metrics_sets", } for key, value in expected_derived_metrics_entities.items(): assert ( MOCKED_DERIVED_METRICS[key].get_entity( projects=[self.project], use_case_id=use_case_id ) ) == value # Incorrectly setup SingularEntityDerivedMetric with metrics spanning multiple entities with pytest.raises(DerivedMetricParseException): self.crash_free_fake.get_entity(projects=[self.proje
fix(snuba): Add appropriate `UseCaseKey` for indexer [TET-146] (#36308) * fix(snuba): Add appropriate `UseCaseKey` for indexer Update indexer invocation call to have the appropriate `UseCaseKey` depending on use case. In `src/sentry/sentry_metrics/indexer/base.py::StringIndexer` when using `resolve` and `reverse_resolve` callers should not rely on the default use_case_id. Important changes: - Add required parameter `use_case_id: UseCaseKey` to `get_series` from `src/sentry/snuba/metrics/datasource.py#L612`; - Add required parameter to `get_metrics` in `src/sentry/snuba/metrics/datasource.py` - Add required parameter to `get_tags` in `src/sentry/snuba/metrics/datasource.py` - Add required parameter to `get_tag_values` in `src/sentry/snuba/metrics/datasource.py`
test_get_entity_and_validate_dependency_tree_of_a_single_entity_derived_metric
cd803d173c72b64d06c0687170bf9a945d0b503c
sentry
test_base.py
14
25
https://github.com/getsentry/sentry.git
2
180
0
47
292
Python
{ "docstring": "\n Tests that ensures that get_entity method works expected in the sense that:\n - Since it is the first function that is called by the query_builder, validation is\n applied there to ensure that if it is an instance of a SingleEntityDerivedMetric,\n then it is composed of only other SingleEntityDerivedMetric or\n RawMetric that belong to the same entity\n - Return the entity of that derived metric\n ", "language": "en", "n_whitespaces": 114, "n_words": 64, "vocab_size": 44 }
def test_get_entity_and_validate_dependency_tree_of_a_single_entity_derived_metric(self): use_case_id = UseCaseKey.RELEASE_HEALTH expected_derived_metrics_entities = { SessionMRI.ALL.value: "metrics_counters", SessionMRI.ALL_USER.value: "metrics_sets", SessionMRI.CRASHED.value: "metrics_counters", SessionMRI.CRASHED_USER.value: "metrics_sets", SessionMRI.ABNORMAL.value: "metrics_counters", SessionMRI.ABNORMAL_USER.value: "metrics_sets", SessionMRI.CRASH_FREE_RATE.value: "metrics_counters", SessionMRI.CRASH_FREE_USER_RATE.value: "metrics_sets", SessionMRI.ERRORED_PREAGGREGATED.value: "metrics_counters", SessionMRI.ERRORED_SET.value: "metrics_sets", SessionMRI.ERRORED_USER_ALL.value: "metrics_sets", SessionMRI.CRASHED_AND_ABNORMAL_USER.value: "metrics_sets", SessionMRI.ERRORED_USER.value: "metrics_sets", } for key, value in expected_derived_metrics_entities.items(): assert ( MOCKED_DERIVED_METRICS[key].get_entity( projects=[self.project], use_case_id=use_case_id ) ) == value # Incorrectly setup SingularEntityDerivedMetric with metrics spanning multiple entities with pytest.raises(DerivedMetricParseException): self.crash_free_fake.get_entity(projects=[self.project], use_case_id=use_case_id)
76,202
260,356
83
sklearn/decomposition/_sparse_pca.py
23
13
def transform(self, X): check_is_fitted(self) X = self._validate_data(X, reset=False) X = X - self.mean_ U = ridge_regression( self.components_.T, X.T, self.ridge_alpha, solver="cholesky" ) return U
MAINT Use _validate_params in SparsePCA and MiniBatchSparsePCA (#23710) Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com> Co-authored-by: jeremiedbb <jeremiedbb@yahoo.fr>
transform
db6123fe40400828918037f3fae949bfcc4d9d05
scikit-learn
_sparse_pca.py
10
8
https://github.com/scikit-learn/scikit-learn.git
1
55
0
18
87
Python
{ "docstring": "Least Squares projection of the data onto the sparse components.\n\n To avoid instability issues in case the system is under-determined,\n regularization can be applied (Ridge regression) via the\n `ridge_alpha` parameter.\n\n Note that Sparse PCA components orthogonality is not enforced as in PCA\n hence one cannot use a simple linear projection.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test data to be transformed, must have the same number of\n features as the data used to train the model.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Transformed data.\n ", "language": "en", "n_whitespaces": 207, "n_words": 90, "vocab_size": 69 }
def transform(self, X): check_is_fitted(self) X = self._validate_data(X, reset=False) X = X - self.mean_ U = ridge_regression( self.components_.T, X.T, self.ridge_alpha, solver="cholesky" ) return U
77,797
264,756
233
netbox/utilities/utils.py
117
27
def serialize_object(obj, extra=None): json_str = serialize('json', [obj]) print(json_str) data = json.loads(json_str)[0]['fields'] # Exclude any MPTTModel fields if issubclass(obj.__class__, MPTTModel): for field in ['level', 'lft', 'rght', 'tree_id']: data.pop
Extend Cable model to support multiple A/B terminations
serialize_object
4bb9b6ee2639db683b70d6ddbee055497e0a3647
netbox
utils.py
12
18
https://github.com/netbox-community/netbox.git
11
167
0
86
285
Python
{ "docstring": "\n Return a generic JSON representation of an object using Django's built-in serializer. (This is used for things like\n change logging, not the REST API.) Optionally include a dictionary to supplement the object data. A list of keys\n can be provided to exclude them from the returned dictionary. Private fields (prefaced with an underscore) are\n implicitly excluded.\n ", "language": "en", "n_whitespaces": 72, "n_words": 56, "vocab_size": 49 }
def serialize_object(obj, extra=None): json_str = serialize('json', [obj]) print(json_str) data = json.loads(json_str)[0]['fields'] # Exclude any MPTTModel fields if issubclass(obj.__class__, MPTTModel): for field in ['level', 'lft', 'rght', 'tree_id']: data.pop(field) # Include custom_field_data as "custom_fields" if hasattr(obj, 'custom_field_data'): data['custom_fields'] = data.pop('custom_field_data') # Include any tags. Check for tags cached on the instance; fall back to using the manager. if is_taggable(obj): tags = getattr(obj, '_tags', None) or obj.tags.all() data['tags'] = [tag.name for tag in tags] # Append any extra data if extra is not None: data.update(extra) # Copy keys to list to avoid 'dictionary changed size during iteration' exception for key in list(data): # Private fields shouldn't be logged in the object change if isinstance(key, str) and key.startswith('_'): data.pop(key) return data
88,520
289,378
405
tests/components/history/test_init.py
112
20
async def test_statistics_during_period(recorder_mock, hass, hass_ws_client, caplog): now = dt_util.utcnow() await async_setup_component(hass, "history", {}) client = await hass_ws_client() # Test the WS API works and issues a warning await client.send_json( { "id": 1, "type": "history/statistics_during_period", "start_time": now.isoformat(), "end_time": now.isoformat(), "statistic_ids": ["sensor.test"], "period": "hour", } ) response = await client.receive_json() assert response["success"] assert response["result"] == {} assert ( "WS API 'history/statistics_during_period' is deprecated and will be removed in " "Home Assistant Core 2022.12. Use 'recorder/statistics_during_period' instead" ) in caplog.text # Test the WS API forwards to recorder with patch( "homeassistant.components.history.recorder_ws.ws_handle_get_statistics_during_period", wraps=ws_handle_get_statist
Ensure recorder test fixture is setup before hass fixture (#80528) * Ensure recorder test fixture is setup before hass fixture * Adjust more tests
test_statistics_during_period
31a787558fd312331b55e5c2c4b33341fc3601fc
core
test_init.py
14
37
https://github.com/home-assistant/core.git
1
173
0
76
319
Python
{ "docstring": "Test history/statistics_during_period forwards to recorder.", "language": "en", "n_whitespaces": 4, "n_words": 5, "vocab_size": 5 }
async def test_statistics_during_period(recorder_mock, hass, hass_ws_client, caplog): now = dt_util.utcnow() await async_setup_component(hass, "history", {}) client = await hass_ws_client() # Test the WS API works and issues a warning await client.send_json( { "id": 1, "type": "history/statistics_during_period", "start_time": now.isoformat(), "end_time": now.isoformat(), "statistic_ids": ["sensor.test"], "period": "hour", } ) response = await client.receive_json() assert response["success"] assert response["result"] == {} assert ( "WS API 'history/statistics_during_period' is deprecated and will be removed in " "Home Assistant Core 2022.12. Use 'recorder/statistics_during_period' instead" ) in caplog.text # Test the WS API forwards to recorder with patch( "homeassistant.components.history.recorder_ws.ws_handle_get_statistics_during_period", wraps=ws_handle_get_statistics_during_period, ) as ws_mock: await client.send_json( { "id": 2, "type": "history/statistics_during_period", "start_time": now.isoformat(), "end_time": now.isoformat(), "statistic_ids": ["sensor.test"], "period": "hour", } ) await client.receive_json() ws_mock.assert_awaited_once()
16,427
75,606
104
wagtail/search/management/commands/update_index.py
24
8
def queryset_chunks(self, qs, chunk_size=DEFAULT_CHUNK_SIZE): i = 0
Reformat with black
queryset_chunks
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
update_index.py
14
8
https://github.com/wagtail/wagtail.git
3
44
0
21
73
Python
{ "docstring": "\n Yield a queryset in chunks of at most ``chunk_size``. The chunk yielded\n will be a list, not a queryset. Iterating over the chunks is done in a\n transaction so that the order and count of items in the queryset\n remains stable.\n ", "language": "en", "n_whitespaces": 77, "n_words": 41, "vocab_size": 31 }
def queryset_chunks(self, qs, chunk_size=DEFAULT_CHUNK_SIZE): i = 0 while True: items = list(qs[i * chunk_size :][:chunk_size]) if not items: break yield items i += 1
19,211
95,431
229
src/sentry/search/events/builder.py
45
15
def flattened_having(self) -> List[Condition]: flattened: List[Condition] = [] boolean_conditions: List[BooleanCondition] = [] for condition in self.having: if isinstance(condition, Condition): flattened.append(condition) elif isinstance(condition, BooleanCondition): boolean_conditions.append(condition) while len(boolean_conditions) > 0: boolean_condition = boolean_conditions.pop() for condition in boolean_condition.conditions: if isinstance(condition, Condition): flattened.append(condition)
fix(snql): Add aggregations to select in auto_aggregation (#31061) - This is to fix an issue for queries that have the uniq aggregation in the HAVING clause, and is not selected. - Previously we would not add the aggregation to the select clause in these cases - Now anything in the having clause will get added to the select clause as well if auto_aggregation is enabled - if its disabled we raise an invalid search query error - This also fixes a bug where this having validation wasn't working correctly for boolean conditions
flattened_having
2a4da479b2d4a2faa901701f4c73ff823236e9e8
sentry
builder.py
14
20
https://github.com/getsentry/sentry.git
8
116
0
30
184
Python
{ "docstring": "Return self.having as a flattened list ignoring boolean operators\n This is because self.having can have a mix of BooleanConditions and Conditions. And each BooleanCondition can in\n turn be a mix of either type.\n ", "language": "en", "n_whitespaces": 54, "n_words": 33, "vocab_size": 27 }
def flattened_having(self) -> List[Condition]: flattened: List[Condition] = [] boolean_conditions: List[BooleanCondition] = [] for condition in self.having: if isinstance(condition, Condition): flattened.append(condition) elif isinstance(condition, BooleanCondition): boolean_conditions.append(condition) while len(boolean_conditions) > 0: boolean_condition = boolean_conditions.pop() for condition in boolean_condition.conditions: if isinstance(condition, Condition): flattened.append(condition) elif isinstance(condition, BooleanCondition): boolean_conditions.append(condition) return flattened
39,468
163,634
211
pandas/core/arrays/datetimelike.py
64
27
def _add_timedelta_arraylike(self, other): # overridden by PeriodArray if len(self) != len(other): raise ValueError("cannot add indices of unequal length") if isinstance(other, np.ndarray):
EA interface: rename ExtensionArray._hasnans to ._hasna (#45519)
_add_timedelta_arraylike
a0b40c0f2ad73420a54e48ec4f564b9667e3f452
pandas
datetimelike.py
10
15
https://github.com/pandas-dev/pandas.git
5
122
0
57
191
Python
{ "docstring": "\n Add a delta of a TimedeltaIndex\n\n Returns\n -------\n Same type as self\n ", "language": "en", "n_whitespaces": 48, "n_words": 12, "vocab_size": 11 }
def _add_timedelta_arraylike(self, other): # overridden by PeriodArray if len(self) != len(other): raise ValueError("cannot add indices of unequal length") if isinstance(other, np.ndarray): # ndarray[timedelta64]; wrap in TimedeltaIndex for op from pandas.core.arrays import TimedeltaArray other = TimedeltaArray._from_sequence(other) self_i8 = self.asi8 other_i8 = other.asi8 new_values = checked_add_with_arr( self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan ) if self._hasna or other._hasna: mask = self._isnan | other._isnan np.putmask(new_values, mask, iNaT) return type(self)(new_values, dtype=self.dtype)
80,737
271,248
1,164
keras/engine/functional.py
488
55
def _map_graph_network(inputs, outputs): # "depth" is number of layers between output Node and the Node. # Nodes are ordered from inputs -> outputs. nodes_in_decreasing_depth, layer_indices = _build_map(outputs) network_nodes = { _make_node_key(node.layer.name, node.layer._inbound_nodes.index(node)) for node in nodes_in_decreasing_depth } nodes_depths = {} # dict {node: depth value} layers_depths = {} # dict {layer: depth value} for node in reversed(nodes_in_decreasing_depth): # If the depth is not set, the node has no outbound nodes (depth 0). depth = nodes_depths.setdefault(node, 0) # Update the depth of the corresponding layer previous_depth = layers_depths.get(node.layer, 0) # If we've seen this layer before at a higher depth, # we should use that depth instead of the node depth. # This is necessary for shared layers that have inputs at different # depth levels in the graph. depth = max(depth, previous_depth) layers_depths[node.layer] = depth nodes_depths[node] = depth # Update the depth of inbound nodes. # The "depth" of a node is the max of the depths # of all nodes it is connected to + 1. for node_dep in node.parent_nodes: previous_depth = nodes_depths.get(node_dep, 0) nodes_depths[node_dep] = max(depth + 1, previous_depth) # Handle inputs that are not connected to outputs. # We do not error out here because the inputs may be used to compute losses # and metrics. for input_t in inputs: input_layer = input_t._keras_history[0] if input_layer not in layers_depths: layers_depths[input_layer] = 0 layer_indices[input_layer] = -1 nodes_depths[input_layer._inbound_nodes[0]] = 0 network_nodes.add(_make_node_key(input_layer.name, 0)) # Build a dict {depth: list of nodes with this depth} nodes_by_depth = collections.defaultdict(list) for node, depth in nodes_depths.items(): nodes_by_depth[depth].append(node) # Build a dict {depth: list of layers with this depth} layers_by_depth = collections.defaultdict(list) for layer, depth in layers_depths.items(): layers_by_depth[depth].append(layer) # Get sorted list of layer depths. depth_keys = list(layers_by_depth.keys()) depth_keys.sort(reverse=True) # Set self.layers ordered by depth. layers = [] for depth in depth_keys: layers_for_depth = layers_by_depth[depth] # Network.layers needs to have a deterministic order: # here we order them by traversal order. layers_for_depth.sort(key=lambda x: layer_indices[x]) layers.extend(layers_for_depth) # Get sorted list of node depths. depth_keys = list(nodes_by_depth.keys()) depth_keys.sort(reverse=True) # Check that all tensors required are computable. # computable_tensors: all tensors in the graph # that can be computed from the inputs provided. computable_tensors = set() for x in inputs: computable_tensors.add(id(x)) layers_with_complete_input = [] # To provide a better error msg. for depth in depth_keys: for node in nodes_by_depth[depth]: layer = node.layer if layer and not node.is_input: for x in tf.nest.flatten(node.keras_inputs): if id(x) not in computable_tensors:
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
_map_graph_network
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
functional.py
21
65
https://github.com/keras-team/keras.git
20
470
0
245
792
Python
{ "docstring": "Validates a network's topology and gather its layers and nodes.\n\n Args:\n inputs: List of input tensors.\n outputs: List of outputs tensors.\n\n Returns:\n A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`.\n - nodes: list of Node instances.\n - nodes_by_depth: dict mapping ints (depth) to lists of node instances.\n - layers: list of Layer instances.\n - layers_by_depth: dict mapping ints (depth) to lists of layer instances.\n\n Raises:\n ValueError: In case the network is not valid (e.g. disconnected graph).\n ", "language": "en", "n_whitespaces": 126, "n_words": 74, "vocab_size": 53 }
def _map_graph_network(inputs, outputs): # "depth" is number of layers between output Node and the Node. # Nodes are ordered from inputs -> outputs. nodes_in_decreasing_depth, layer_indices = _build_map(outputs) network_nodes = { _make_node_key(node.layer.name, node.layer._inbound_nodes.index(node)) for node in nodes_in_decreasing_depth } nodes_depths = {} # dict {node: depth value} layers_depths = {} # dict {layer: depth value} for node in reversed(nodes_in_decreasing_depth): # If the depth is not set, the node has no outbound nodes (depth 0). depth = nodes_depths.setdefault(node, 0) # Update the depth of the corresponding layer previous_depth = layers_depths.get(node.layer, 0) # If we've seen this layer before at a higher depth, # we should use that depth instead of the node depth. # This is necessary for shared layers that have inputs at different # depth levels in the graph. depth = max(depth, previous_depth) layers_depths[node.layer] = depth nodes_depths[node] = depth # Update the depth of inbound nodes. # The "depth" of a node is the max of the depths # of all nodes it is connected to + 1. for node_dep in node.parent_nodes: previous_depth = nodes_depths.get(node_dep, 0) nodes_depths[node_dep] = max(depth + 1, previous_depth) # Handle inputs that are not connected to outputs. # We do not error out here because the inputs may be used to compute losses # and metrics. for input_t in inputs: input_layer = input_t._keras_history[0] if input_layer not in layers_depths: layers_depths[input_layer] = 0 layer_indices[input_layer] = -1 nodes_depths[input_layer._inbound_nodes[0]] = 0 network_nodes.add(_make_node_key(input_layer.name, 0)) # Build a dict {depth: list of nodes with this depth} nodes_by_depth = collections.defaultdict(list) for node, depth in nodes_depths.items(): nodes_by_depth[depth].append(node) # Build a dict {depth: list of layers with this depth} layers_by_depth = collections.defaultdict(list) for layer, depth in layers_depths.items(): layers_by_depth[depth].append(layer) # Get sorted list of layer depths. depth_keys = list(layers_by_depth.keys()) depth_keys.sort(reverse=True) # Set self.layers ordered by depth. layers = [] for depth in depth_keys: layers_for_depth = layers_by_depth[depth] # Network.layers needs to have a deterministic order: # here we order them by traversal order. layers_for_depth.sort(key=lambda x: layer_indices[x]) layers.extend(layers_for_depth) # Get sorted list of node depths. depth_keys = list(nodes_by_depth.keys()) depth_keys.sort(reverse=True) # Check that all tensors required are computable. # computable_tensors: all tensors in the graph # that can be computed from the inputs provided. computable_tensors = set() for x in inputs: computable_tensors.add(id(x)) layers_with_complete_input = [] # To provide a better error msg. for depth in depth_keys: for node in nodes_by_depth[depth]: layer = node.layer if layer and not node.is_input: for x in tf.nest.flatten(node.keras_inputs): if id(x) not in computable_tensors: raise ValueError( f"Graph disconnected: cannot obtain value for tensor {x} " f'at layer "{layer.name}". The following previous layers ' f"were accessed without issue: {layers_with_complete_input}" ) for x in tf.nest.flatten(node.outputs): computable_tensors.add(id(x)) layers_with_complete_input.append(layer.name) # Ensure name unicity, which will be crucial for serialization # (since serialized nodes refer to layers by their name). all_names = [layer.name for layer in layers] for name in all_names: if all_names.count(name) != 1: raise ValueError( f'The name "{name}" is used {all_names.count(name)} ' "times in the model. All layer names should be unique." ) return network_nodes, nodes_by_depth, layers, layers_by_depth
28,513
127,723
138
python/ray/data/dataset.py
40
18
def default_batch_format(self) -> Type: # noqa: E501 import pandas as pd import pyarrow as pa schema = self.schema() assert isinstance(schema,
[Datasets] Add `Dataset.default_batch_format` (#28434) Participants in the PyTorch UX study couldn't understand how the "native" batch format works. This PR introduces a method Dataset.native_batch_format that tells users exactly what the native batch format is, so users don't have to guess.
default_batch_format
206e847694cba414dc4664e4ae02b20e10e3f25d
ray
dataset.py
10
72
https://github.com/ray-project/ray.git
4
79
0
32
124
Python
{ "docstring": "Return this dataset's default batch format.\n\n The default batch format describes what batches of data look like. To learn more\n about batch formats, read\n :ref:`writing user-defined functions <transform_datasets_writing_udfs>`.\n\n Example:\n\n If your dataset represents a list of Python objects, then the default batch\n format is ``list``.\n\n >>> ds = ray.data.range(100)\n >>> ds # doctest: +SKIP\n Dataset(num_blocks=20, num_rows=100, schema=<class 'int'>)\n >>> ds.default_batch_format()\n <class 'list'>\n >>> next(ds.iter_batches(batch_size=4))\n [0, 1, 2, 3]\n\n If your dataset contains a single ``TensorDtype`` or ``ArrowTensorType``\n column named ``__value__`` (as created by :func:`ray.data.from_numpy`), then\n the default batch format is ``np.ndarray``. For more information on tensor\n datasets, read the :ref:`tensor support guide <datasets_tensor_support>`.\n\n >>> ds = ray.data.range_tensor(100)\n >>> ds # doctest: +SKIP\n Dataset(num_blocks=20, num_rows=100, schema={__value__: ArrowTensorType(shape=(1,), dtype=int64)})\n >>> ds.default_batch_format()\n <class 'numpy.ndarray'>\n >>> next(ds.iter_batches(batch_size=4))\n array([[0],\n [1],\n [2],\n [3]])\n\n If your dataset represents tabular data and doesn't only consist of a\n ``__value__`` tensor column (such as is created by\n :meth:`ray.data.from_numpy`), then the default batch format is\n ``pd.DataFrame``.\n\n >>> import pandas as pd\n >>> df = pd.DataFrame({\"foo\": [\"a\", \"b\"], \"bar\": [0, 1]})\n >>> ds = ray.data.from_pandas(df)\n >>> ds # doctest: +SKIP\n Dataset(num_blocks=1, num_rows=2, schema={foo: object, bar: int64})\n >>> ds.default_batch_format()\n <class 'pandas.core.frame.DataFrame'>\n >>> next(ds.iter_batches(batch_size=4))\n foo bar\n 0 a 0\n 1 b 1\n\n .. seealso::\n\n :meth:`~Dataset.map_batches`\n Call this function to transform batches of data.\n\n :meth:`~Dataset.iter_batches`\n Call this function to iterate over batches of data.\n\n ", "language": "en", "n_whitespaces": 768, "n_words": 219, "vocab_size": 130 }
def default_batch_format(self) -> Type: # noqa: E501 import pandas as pd import pyarrow as pa schema = self.schema() assert isinstance(schema, (type, PandasBlockSchema, pa.Schema)) if isinstance(schema, type): return list if isinstance(schema, (PandasBlockSchema, pa.Schema)): if schema.names == [VALUE_COL_NAME]: return np.ndarray return pd.DataFrame
@pytest.mark.parametrize("Tree", REG_TREES.values()) @pytest.mark.parametrize( "old_criterion, new_criterion", [ ("mse", "squared_error"), ("mae", "absolute_error"), ], )
75,740
259,378
282
sklearn/tree/tests/test_tree.py
159
40
def test_decision_tree_regressor_sample_weight_consistency(criterion): tree_params = dict(criterion=criterion) tree = DecisionTreeRegressor(**tree_params, random_state=42) for kind in ["zeros", "ones"]: check_sample_weights_invariance( "DecisionTreeRegressor_" + criterion, tree, kind="zeros" ) rng = np.random.RandomState(0) n_samples, n_features = 10, 5 X = rng.rand(n_samples, n_features) y = np.mean(X, axis=1) + rng.rand(n_samples) # make it positive in order to work also for poisson criterion y += np.min(y) + 0.1 # check that multiplying sample_weight by 2 is equivalent # to repeating corresponding samples twice X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) y2 = np.concatenate([y, y[: n_samples // 2]]) sample_weight_1 = np.ones(len(y)) sample_weight_1[: n_samples // 2] = 2 tree1 = DecisionTreeRegressor(**tree_params).fit( X, y, sample_weight=sample_weight_1 ) tree2 = DecisionTreeRegressor(**tree_params).fit(X2, y2, sample_weight=None) assert tree1.tree_.node_count == tree2.tree_.node_count # Thresholds, tree.tree_.threshold, and values, tree.tree_.value, are not # exactly the same, but on the training set, those differences do not # matter and thus predictions are the same. assert_allclose(tree1.predict(X), tree2.predict(X)) # TODO: Remove in v1.2 @pytest.mark.parametrize("Tree", REG_TREES.values()) @pytest.mark.parametrize( "old_criterion, new_criterion", [ ("mse", "squared_error"),
MNT fix typo in tree test name (#22943)
test_decision_tree_regressor_sample_weight_consistency
f89a40bd92004368dee38ea76a1b9eaddaff4d7a
scikit-learn
test_tree.py
12
22
https://github.com/scikit-learn/scikit-learn.git
2
212
1
123
431
Python
{ "docstring": "Test that the impact of sample_weight is consistent.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def test_decision_tree_regressor_sample_weight_consistency(criterion): tree_params = dict(criterion=criterion) tree = DecisionTreeRegressor(**tree_params, random_state=42) for kind in ["zeros", "ones"]: check_sample_weights_invariance( "DecisionTreeRegressor_" + criterion, tree, kind="zeros" ) rng = np.random.RandomState(0) n_samples, n_features = 10, 5 X = rng.rand(n_samples, n_features) y = np.mean(X, axis=1) + rng.rand(n_samples) # make it positive in order to work also for poisson criterion y += np.min(y) + 0.1 # check that multiplying sample_weight by 2 is equivalent # to repeating corresponding samples twice X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) y2 = np.concatenate([y, y[: n_samples // 2]]) sample_weight_1 = np.ones(len(y)) sample_weight_1[: n_samples // 2] = 2 tree1 = DecisionTreeRegressor(**tree_params).fit( X, y, sample_weight=sample_weight_1 ) tree2 = DecisionTreeRegressor(**tree_params).fit(X2, y2, sample_weight=None) assert tree1.tree_.node_count == tree2.tree_.node_count # Thresholds, tree.tree_.threshold, and values, tree.tree_.value, are not # exactly the same, but on the training set, those differences do not # matter and thus predictions are the same. assert_allclose(tree1.predict(X), tree2.predict(X)) # TODO: Remove in v1.2 @pytest.mark.parametrize("Tree", REG_TREES.values()) @pytest.mark.parametrize( "old_criterion, new_criterion", [ ("mse", "squared_error"), ("mae", "absolute_error"), ], )
52,020
207,608
110
tests/admin_views/tests.py
24
9
def test_with_fk_to_field(self): response = self.client.get( reverse("admin:auth_user_changelist") + "?q=joe&%s=id" % TO_FIELD_V
Refs #33476 -- Reformatted code with Black.
test_with_fk_to_field
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
12
10
https://github.com/django/django.git
1
46
0
22
83
Python
{ "docstring": "\n The to_field GET parameter is preserved when a search is performed.\n Refs #10918.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 12 }
def test_with_fk_to_field(self): response = self.client.get( reverse("admin:auth_user_changelist") + "?q=joe&%s=id" % TO_FIELD_VAR ) self.assertContains(response, "\n1 user\n") self.assertContains( response, '<input type="hidden" name="%s" value="id">' % TO_FIELD_VAR, html=True, )
46,351
190,450
27
fastai/torch_core.py
15
7
def remove_module_load(state_dict): new_state_dict = OrderedDict() fo
Upgrading to support latest Pytorch version
remove_module_load
4fc3616712edb19179b17dd270ad6cf63abf99c2
DeOldify
torch_core.py
11
4
https://github.com/jantic/DeOldify.git
2
34
0
12
57
Python
{ "docstring": "create new OrderedDict that does not contain `module.`", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def remove_module_load(state_dict): new_state_dict = OrderedDict() for k, v in state_dict.items(): new_state_dict[k[7:]] = v return new_state_dict
@add_start_docstrings( "The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top.", LAYOUTLMV3_START_DOCSTRING, )
6,073
33,182
54
src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py
31
9
def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) LAYOUTLMV3_START_DOCSTRING = r LAYOUTLMV3_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare LayoutLMv3 Model transformer outputting raw hidden-states w
[LayoutLMv3] Add TensorFlow implementation (#18678) Co-authored-by: Esben Toke Christensen <esben.christensen@visma.com> Co-authored-by: Lasse Reedtz <lasse.reedtz@visma.com> Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Co-authored-by: Joao Gante <joaofranciscocardosogante@gmail.com>
serving
de8548ebf3242305d0f9792dacb6f86b196a3a33
transformers
modeling_tf_layoutlmv3.py
8
3
https://github.com/huggingface/transformers.git
1
23
1
28
67
Python
{ "docstring": "\n Method used for serving the model.\n\n Args:\n inputs (`Dict[str, tf.Tensor]`):\n The input of the saved model as a dictionary of tensors.\n \n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n <Tip>\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the\n tensors in the first argument of the model call function: `model(inputs)`.\n\n </Tip>\n\n Parameters:\n config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]\n token. See `pixel_values` for `patch_sequence_length`.\n\n Indices can be obtained using [`LayoutLMv3Tokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n\n bbox (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, 4)`, *optional*):\n Bounding boxes of each input sequence tokens. Selected in the range `[0,\n config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)\n format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,\n y1) represents the position of the lower right corner.\n\n Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]\n token. See `pixel_values` for `patch_sequence_length`.\n\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size,\n config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height /\n config.patch_size) * (width / config.patch_size))`.\n\n attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]\n token. See `pixel_values` for `patch_sequence_length`.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]\n token. See `pixel_values` for `patch_sequence_length`.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]\n token. See `pixel_values` for `patch_sequence_length`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n", "language": "en", "n_whitespaces": 1372, "n_words": 689, "vocab_size": 304 }
def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) LAYOUTLMV3_START_DOCSTRING = r LAYOUTLMV3_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top.", LAYOUTLMV3_START_DOCSTRING, )
107,305
308,556
40
homeassistant/components/sisyphus/media_player.py
8
9
def media_image_url(self): if self._table.active_track: return self._table.active_track.get_th
Sisyphus: Fix bad super call (#63327) Co-authored-by: Franck Nijhof <git@frenck.dev>
media_image_url
9f0805f51293851096d7ece48f48a041e4a809e0
core
media_player.py
11
4
https://github.com/home-assistant/core.git
2
34
0
7
57
Python
{ "docstring": "Return the URL for a thumbnail image of the current track.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
def media_image_url(self): if self._table.active_track: return self._table.active_track.get_thumbnail_url(Track.ThumbnailSize.LARGE) return super().media_image_url
@pytest.mark.parametrize( "ongoing_requests", [[7, 1, 8, 4], [8, 1, 8, 4], [6, 1, 8, 4], [0, 1, 8, 4]])
28,953
129,433
385
python/ray/serve/tests/test_autoscaling_policy.py
107
26
def test_fluctuating_ongoing_requests(delay_s): config = AutoscalingConfig( min_replicas=1, max_replicas=10, target_num_ongoing_requests_per_replica=50, upscale_delay_s=delay_s, downscale_delay_s=delay_s) policy = BasicAutoscalingPolicy(config) if delay_s > 0: wait_periods = int(delay_s / CONTROL_LOOP_PERIOD_S) assert wait_periods > 1 underload_requests, overload_requests = [20, 20], [100] trials = 1000 new_num_replicas = None for trial in range(trials): if trial % 2 == 0: new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1) if delay_s > 0: assert new_num_replicas == 1, trial else: assert new_num_replicas == 2, trial else: new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=underload_requests, curr_target_num_replicas=2) if delay_s > 0: assert new_num_replicas == 2, trial else:
[Serve] Serve Autoscaling Release tests (#21208)
test_fluctuating_ongoing_requests
75b3080834bceb184e9ba19e21511eb0ea19955b
ray
test_autoscaling_policy.py
14
31
https://github.com/ray-project/ray.git
6
155
1
55
301
Python
{ "docstring": "\n Simulates a workload that switches between too many and too few\n ongoing requests.\n ", "language": "en", "n_whitespaces": 23, "n_words": 13, "vocab_size": 12 }
def test_fluctuating_ongoing_requests(delay_s): config = AutoscalingConfig( min_replicas=1, max_replicas=10, target_num_ongoing_requests_per_replica=50, upscale_delay_s=delay_s, downscale_delay_s=delay_s) policy = BasicAutoscalingPolicy(config) if delay_s > 0: wait_periods = int(delay_s / CONTROL_LOOP_PERIOD_S) assert wait_periods > 1 underload_requests, overload_requests = [20, 20], [100] trials = 1000 new_num_replicas = None for trial in range(trials): if trial % 2 == 0: new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1) if delay_s > 0: assert new_num_replicas == 1, trial else: assert new_num_replicas == 2, trial else: new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=underload_requests, curr_target_num_replicas=2) if delay_s > 0: assert new_num_replicas == 2, trial else: assert new_num_replicas == 1, trial @pytest.mark.parametrize( "ongoing_requests", [[7, 1, 8, 4], [8, 1, 8, 4], [6, 1, 8, 4], [0, 1, 8, 4]])
10,583
52,487
78
modules/audio/svs/diffsinger/utils/audio.py
47
7
def librosa_pad_lr(x, fsize, fshift, pad_sides=1):
Add Diffsinger Module (#2120) * add diffsinger * update README * update README
librosa_pad_lr
7eef3bfde63d03acbd1fc9a15a5e56bef47c0ef7
PaddleHub
audio.py
13
7
https://github.com/PaddlePaddle/PaddleHub.git
2
46
0
32
105
Python
{ "docstring": "compute right padding (final frame) or both sides padding (first and final frames)\n ", "language": "en", "n_whitespaces": 16, "n_words": 13, "vocab_size": 12 }
def librosa_pad_lr(x, fsize, fshift, pad_sides=1): assert pad_sides in (1, 2) # return int(fsize // 2) pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0] if pad_sides == 1: return 0, pad else: return pad // 2, pad // 2 + pad % 2 # Conversions
27,112
122,164
55
jax/tools/colab_tpu.py
37
14
def setup_tpu(tpu_driver_version='tpu_driver-0.2'): global TPU_DRIVER_MODE if not TPU_DRIVER_MODE: colab_tpu_addr = os.environ['COLAB_TPU_ADDR'].split(':')[0] url = f
Pin default jax.tools.colab_tpu.setup_tpu driver version. Prior to this change, we were defaulting to the TPU nightly driver version. We should instead pin to the version associated with the default jaxlib version that Colab uses.
setup_tpu
0cc4066bb7bf758a5ba8c5def9c2c32a1c98fb89
jax
colab_tpu.py
13
9
https://github.com/google/jax.git
2
64
0
32
125
Python
{ "docstring": "Sets up Colab to run on TPU.\n\n Note: make sure the Colab Runtime is set to Accelerator: TPU.\n\n Args\n ----\n tpu_driver_version : (str) specify the version identifier for the tpu driver.\n Defaults to \"tpu_driver-0.2\", which can be used with jaxlib 0.3.20. Set to\n \"tpu_driver_nightly\" to use the nightly tpu driver build.\n ", "language": "en", "n_whitespaces": 62, "n_words": 51, "vocab_size": 41 }
def setup_tpu(tpu_driver_version='tpu_driver-0.2'): global TPU_DRIVER_MODE if not TPU_DRIVER_MODE: colab_tpu_addr = os.environ['COLAB_TPU_ADDR'].split(':')[0] url = f'http://{colab_tpu_addr}:8475/requestversion/{tpu_driver_version}' requests.post(url) TPU_DRIVER_MODE = 1 # The following is required to use TPU Driver as JAX's backend. config.FLAGS.jax_xla_backend = "tpu_driver" config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR']
71,987
247,899
133
tests/storage/databases/main/test_lock.py
49
16
def test_timeout_lock(self): lock = self.get_success(self.store.try_acquire_lock("name", "key")) assert lock is not None self.get_success(lock.__aenter__()) # We simulate the process getting stuck by cancelling the looping call # that keeps the lock active.
Add type hints for `tests/unittest.py`. (#12347) In particular, add type hints for get_success and friends, which are then helpful in a bunch of places.
test_timeout_lock
f0b03186d96305fd44d74a89bf4230beec0c5c31
synapse
test_lock.py
11
9
https://github.com/matrix-org/synapse.git
1
95
0
38
166
Python
{ "docstring": "Test that we time out locks if they're not updated for ages", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def test_timeout_lock(self): lock = self.get_success(self.store.try_acquire_lock("name", "key")) assert lock is not None self.get_success(lock.__aenter__()) # We simulate the process getting stuck by cancelling the looping call # that keeps the lock active. lock._looping_call.stop() # Wait for the lock to timeout. self.reactor.advance(2 * _LOCK_TIMEOUT_MS / 1000) lock2 = self.get_success(self.store.try_acquire_lock("name", "key")) self.assertIsNotNone(lock2) self.assertFalse(self.get_success(lock.is_still_valid()))
81,454
275,725
81
keras/preprocessing/image.py
33
11
def random_brightness(x, brightness_range, scale=True): if len(brightness_range) != 2: raise ValueError( "`brightness_range should be tuple or list of two floats. " "Received: %s" % (brightness_range,) ) u = np.random.uniform(brightness_range[0], bri
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
random_brightness
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
image.py
12
8
https://github.com/keras-team/keras.git
2
58
0
33
92
Python
{ "docstring": "Performs a random brightness shift.\n\n Deprecated: `tf.keras.preprocessing.image.random_brightness` does not operate\n on tensors and is not recommended for new code. Prefer\n `tf.keras.layers.RandomBrightness` which provides equivalent functionality as\n a preprocessing layer. For more information, see the tutorial for\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Args:\n x: Input tensor. Must be 3D.\n brightness_range: Tuple of floats; brightness range.\n scale: Whether to rescale the image such that minimum and maximum values\n are 0 and 255 respectively. Default: True.\n\n Returns:\n Numpy image tensor.\n\n Raises:\n ValueError if `brightness_range` isn't a tuple.\n ", "language": "en", "n_whitespaces": 172, "n_words": 90, "vocab_size": 77 }
def random_brightness(x, brightness_range, scale=True): if len(brightness_range) != 2: raise ValueError( "`brightness_range should be tuple or list of two floats. " "Received: %s" % (brightness_range,) ) u = np.random.uniform(brightness_range[0], brightness_range[1]) return apply_brightness_shift(x, u, scale)
107,962
309,255
23
tests/util/test_async.py
14
5
def test_check_loop_sync(caplog): hasync.check_loop() assert "Detected block
Warn on`time.sleep` in event loop (#63766) Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
test_check_loop_sync
dc58bc375ae203e3d394225f9c3a5a14d43cb2f3
core
test_async.py
7
3
https://github.com/home-assistant/core.git
1
18
0
14
34
Python
{ "docstring": "Test check_loop does nothing when called from thread.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def test_check_loop_sync(caplog): hasync.check_loop() assert "Detected blocking call inside the event loop" not in caplog.text
28,641
128,240
279
python/ray/serve/_private/deployment_state.py
70
20
def update(self) -> bool: try: # Add or remove DeploymentReplica instances in self._replicas. # This should be the only place we adjust total number of replicas # we manage. running_replicas_changed = self._scale_deployment_replicas() # Check the state of existing replicas and transition if necessary. running_replicas_changed |= self._check_and_update_replicas() if running_replicas_changed: self._notify_running_replicas_changed() deleted = self._check_curr_status() except Exception: self._curr_status_info = DeploymentStatusInfo( name=self._name, status=DeploymentStatus.UNHEALTHY,
[Serve] add alpha gRPC support (#28175)
update
65d0c0aa48be8f9f7faae857d3ab71444997755a
ray
deployment_state.py
17
24
https://github.com/ray-project/ray.git
3
72
0
56
138
Python
{ "docstring": "Attempts to reconcile this deployment to match its goal state.\n\n This is an asynchronous call; it's expected to be called repeatedly.\n\n Also updates the internal DeploymentStatusInfo based on the current\n state of the system.\n\n Returns true if this deployment was successfully deleted.\n ", "language": "en", "n_whitespaces": 77, "n_words": 42, "vocab_size": 36 }
def update(self) -> bool: try: # Add or remove DeploymentReplica instances in self._replicas. # This should be the only place we adjust total number of replicas # we manage. running_replicas_changed = self._scale_deployment_replicas() # Check the state of existing replicas and transition if necessary. running_replicas_changed |= self._check_and_update_replicas() if running_replicas_changed: self._notify_running_replicas_changed() deleted = self._check_curr_status() except Exception: self._curr_status_info = DeploymentStatusInfo( name=self._name, status=DeploymentStatus.UNHEALTHY, message="Failed to update deployment:" f"\n{traceback.format_exc()}", ) deleted = False return deleted
50,095
202,382
165
tests/csrf_tests/tests.py
48
18
def test_https_malformed_host(self): req = self._get_request(method="POST") req._is_secure_override = True req.META["HTTP_HOST"] = "@malformed" req.META["HTTP_REFERER"] = "https://www.evil.org/somepage" req.META["S
Refs #33476 -- Reformatted code with Black.
test_https_malformed_host
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
10
15
https://github.com/django/django.git
1
99
0
41
176
Python
{ "docstring": "\n CsrfViewMiddleware generates a 403 response if it receives an HTTPS\n request with a bad host.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 14 }
def test_https_malformed_host(self): req = self._get_request(method="POST") req._is_secure_override = True req.META["HTTP_HOST"] = "@malformed" req.META["HTTP_REFERER"] = "https://www.evil.org/somepage" req.META["SERVER_PORT"] = "443" mw = CsrfViewMiddleware(token_view) expected = ( "Referer checking failed - https://www.evil.org/somepage does not " "match any trusted origins." ) with self.assertRaisesMessage(RejectRequest, expected): mw._check_referer(req) response = mw.process_view(req, token_view, (), {}) self.assertEqual(response.status_code, 403)
594
3,894
63
airbyte-integrations/connectors/source-orb/source_orb/source.py
24
18
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]: # TODO: self.authenticator should optionally pull from sel
🎉 New Source: Orb (#9985) * V1 of source_orb connector * add boostrap.md file * add clause on Pagination to bootstrap.md * add SUMMARY documentation * add lookback_window_days connector parameter * Add support for start_date parameter * Add ability to transform record in order to un-nest IDs * Add support for extracting event properties based on connector configuration
stream_slices
1e0ac30ebdcfce55a5644bcd486044da45c93dd6
airbyte
source.py
12
11
https://github.com/airbytehq/airbyte.git
2
57
0
24
93
Python
{ "docstring": "\n This stream is sliced per `customer_id`. This has two implications:\n (1) State can be checkpointed after processing each slice\n (2) The other parameters (e.g. request_params, path) can be dependent on this slice.\n\n This allows us to pull data on a per customer_id basis, since that's what Orb exposes.\n ", "language": "en", "n_whitespaces": 84, "n_words": 48, "vocab_size": 42 }
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]: # TODO: self.authenticator should optionally pull from self._session.auth customers_stream = Customers(authenticator=self._session.auth) for customer in customers_stream.read_records(sync_mode=SyncMode.full_refresh): yield {"customer_id": customer["id"]}
82,567
278,476
526
keras/utils/metrics_utils.py
193
24
def ragged_assert_compatible_and_get_flat_values(values, mask=None): if isinstance(values, list): is_all_ragged = all(isinstance(rt, tf.RaggedTensor) for rt in values) is_any_ragged = any(isinstance(rt, tf.RaggedTensor) for rt in values) else: is_all_ragged = isinstance(values, tf.RaggedTensor) is_any_ragged = is_all_ragged if is_all_ragged and ((mask is None) or isinstance(mask, tf.RaggedTensor)): to_be_stri
resolve line-too-long in utils
ragged_assert_compatible_and_get_flat_values
80ee2fa4e1db2dda14370110830db82be3eb97b7
keras
metrics_utils.py
16
36
https://github.com/keras-team/keras.git
14
244
0
107
405
Python
{ "docstring": "If ragged, it checks the compatibility and then returns the flat_values.\n\n Note: If two tensors are dense, it does not check their compatibility.\n Note: Although two ragged tensors with different ragged ranks could have\n identical overall rank and dimension sizes and hence be compatible,\n we do not support those cases.\n Args:\n values: A list of potentially ragged tensor of the same ragged_rank.\n mask: A potentially ragged tensor of the same ragged_rank as elements in\n Values.\n\n Returns:\n A tuple in which the first element is the list of tensors and the second\n is the mask tensor. ([Values], mask). Mask and the element in Values\n are equal to the flat_values of the input arguments (if they were\n ragged).\n ", "language": "en", "n_whitespaces": 205, "n_words": 116, "vocab_size": 77 }
def ragged_assert_compatible_and_get_flat_values(values, mask=None): if isinstance(values, list): is_all_ragged = all(isinstance(rt, tf.RaggedTensor) for rt in values) is_any_ragged = any(isinstance(rt, tf.RaggedTensor) for rt in values) else: is_all_ragged = isinstance(values, tf.RaggedTensor) is_any_ragged = is_all_ragged if is_all_ragged and ((mask is None) or isinstance(mask, tf.RaggedTensor)): to_be_stripped = False if not isinstance(values, list): values = [values] to_be_stripped = True # NOTE: we leave the flat_values compatibility to # tf.TensorShape `assert_is_compatible_with` check if both dynamic # dimensions are equal and then use the flat_values. nested_row_split_list = [rt.nested_row_splits for rt in values] assertion_list = _assert_splits_match(nested_row_split_list) # if both are ragged sample_weights also should be ragged with same # dims. if isinstance(mask, tf.RaggedTensor): assertion_list_for_mask = _assert_splits_match( [nested_row_split_list[0], mask.nested_row_splits] ) with tf.control_dependencies(assertion_list_for_mask): mask = tf.expand_dims(mask.flat_values, -1) # values has at least 1 element. flat_values = [] for value in values: with tf.control_dependencies(assertion_list): flat_values.append(tf.expand_dims(value.flat_values, -1)) values = flat_values[0] if to_be_stripped else flat_values elif is_any_ragged: raise TypeError( "Some of the inputs are not tf.RaggedTensor. " f"Input received: {values}" ) # values are empty or value are not ragged and mask is ragged. elif isinstance(mask, tf.RaggedTensor): raise TypeError( "Ragged mask is not allowed with non-ragged inputs. " f"Input received: {values}, mask received: {mask}" ) return values, mask
2,286
12,428
124
jina/orchestrate/deployments/__init__.py
25
12
def update_sandbox_args(self): if self.is_sandbox: host, port = HubIO.deploy_public_sandbox(self.args) self._sandbox_deployed = True self.first_pod_args.host = host self.first_pod_args.port = port if self.head_args: self.pod_args['head'].host = h
fix: do not deploy sandbox on init (#4844)
update_sandbox_args
7c4c39a9d82c58ef2493c21a288c755901a9594e
jina
__init__.py
13
9
https://github.com/jina-ai/jina.git
3
67
0
16
112
Python
{ "docstring": "Update args of all its pods based on the host and port returned by Hubble", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 15 }
def update_sandbox_args(self): if self.is_sandbox: host, port = HubIO.deploy_public_sandbox(self.args) self._sandbox_deployed = True self.first_pod_args.host = host self.first_pod_args.port = port if self.head_args: self.pod_args['head'].host = host self.pod_args['head'].port = port
37,328
158,146
44
d2l/mxnet.py
21
5
def download_all(): for name in DATA_HUB: download(name) DATA_HUB['kaggle_house_train'] = ( DATA_URL + 'kaggle_house_pred_train.csv', '585e9cc93e70b39160e7921475f9bcd7d31219ce') DATA_HUB['kaggle_house_test'] = ( DATA_URL + 'kaggle_house
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <haizhou.sun@smartmore.com> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <ubuntu@ip-172-31-12-66.us-west-2.compute.internal> Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> Co-authored-by: Aston Zhang <asv325@gmail.com> * 重复语句 (#1188) Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <haizhou.sun@smartmore.com> Co-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com> Co-authored-by: Xinwei Liu <xinzone@outlook.com> Co-authored-by: Anirudh Dagar <anirudhdagar6@gmail.com> Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> Co-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com> Co-authored-by: gyro永不抽风 <1247006353@qq.com> Co-authored-by: CanChengZheng <zcc550169544@163.com> Co-authored-by: linlin <jajupmochi@gmail.com> Co-authored-by: iuk <liukun0104@gmail.com> Co-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com> Co-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com> Co-authored-by: Chiyuan Fu <fuchiyuan2019@outlook.com> Co-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com> Co-authored-by: Haiker Sun <haizhou.uestc2011@gmail.com> Co-authored-by: Ming Liu <akira.liu@njnu.edu.cn> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> Co-authored-by: silenceZheng66 <13754430639@163.com> Co-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com> Co-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com> Co-authored-by: Krahets <krahets@163.com> Co-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com> Co-authored-by: Jameson <miraclecome@gmail.com> Co-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com> Co-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com> Co-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com> Co-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com> Co-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com> Co-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com> Co-authored-by: VigourJiang <jiangfuqiang154@163.com> Co-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com> Co-authored-by: LYF <27893441+liyufan@users.noreply.github.com> Co-authored-by: Aston Zhang <asv325@gmail.com> Co-authored-by: xiaotinghe <xiaotih@amazon.com> Co-authored-by: Ubuntu <ubuntu@ip-172-31-12-66.us-west-2.compute.internal> Co-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com> Co-authored-by: HinGwenWoong <peterhuang0323@qq.com> Co-authored-by: Shuai Zhang <cheungdaven@gmail.com>
download_all
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
d2l-zh
mxnet.py
9
3
https://github.com/d2l-ai/d2l-zh.git
2
14
0
17
72
Python
{ "docstring": "Download all files in the DATA_HUB.\n\n Defined in :numref:`sec_kaggle_house`", "language": "en", "n_whitespaces": 11, "n_words": 9, "vocab_size": 8 }
def download_all(): for name in DATA_HUB: download(name) DATA_HUB['kaggle_house_train'] = ( DATA_URL + 'kaggle_house_pred_train.csv', '585e9cc93e70b39160e7921475f9bcd7d31219ce') DATA_HUB['kaggle_house_test'] = ( DATA_URL + 'kaggle_house_pred_test.csv', 'fa19780a7b011d9b009e8bff8e99922a8ee2eb90')
55,519
218,873
46
python3.10.4/Lib/lib2to3/pytree.py
14
5
def generate_matches(self, nodes): r = {} if nodes and self.match(nodes[0], r): yield 1, r
add python 3.10.4 for windows
generate_matches
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
pytree.py
9
4
https://github.com/XX-net/XX-Net.git
3
31
0
13
51
Python
{ "docstring": "\n Generator yielding all matches for this pattern.\n\n Default implementation for non-wildcard patterns.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 11 }
def generate_matches(self, nodes): r = {} if nodes and self.match(nodes[0], r): yield 1, r
15,953
73,139
31
wagtail/contrib/modeladmin/helpers/permission.py
10
7
def user_can_delete_obj(self, user, obj): perm_codenam
Reformat with black
user_can_delete_obj
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
permission.py
9
3
https://github.com/wagtail/wagtail.git
1
27
0
10
45
Python
{ "docstring": "\n Return a boolean to indicate whether `user` is permitted to 'delete'\n a specific `self.model` instance.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 13 }
def user_can_delete_obj(self, user, obj): perm_codename = self.get_perm_codename("delete") return self.user_has_specific_permission(user, perm_codename)
20,823
101,409
69
tools/preview/preview.py
18
9
def _busy_indicator_trace(self, *args) -> None: logger.trace("Busy indicator trace: %s", args) # type: ignor
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
_busy_indicator_trace
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
faceswap
preview.py
10
13
https://github.com/deepfakes/faceswap.git
2
40
0
18
72
Python
{ "docstring": " Show or hide busy indicator based on whether the preview is updating.\n\n Parameters\n ----------\n args: unused\n Required for tkinter event, but unused\n ", "language": "en", "n_whitespaces": 62, "n_words": 22, "vocab_size": 21 }
def _busy_indicator_trace(self, *args) -> None: logger.trace("Busy indicator trace: %s", args) # type: ignore if self._busy_tkvar.get(): self._start_busy_indicator() else: self._stop_busy_indicator()
46,056
189,448
356
manim/mobject/svg/code_mobject.py
41
20
def _gen_html_string(self): self.html_string = _hilite_me( self.code_string, self.language, self.style, self.insert_line_no, "border:solid gray;bor
Hide more private methods from the docs. (#2468) * hide privs from text_mobject.py * hide privs from tex_mobject.py * hide privs from code_mobject.py * hide privs from svg_mobject.py * remove SVGPath and utils from __init__.py * don't import string_to_numbers * hide privs from geometry.py * hide privs from matrix.py * hide privs from numbers.py * hide privs from three_dimensions.py * forgot underscore under set_stroke_width_from_length * there were more i missed * unhidea method that was used in docs * forgot other text2hash * remove svg_path from docs
gen_html_string
902e7eb4f0147b5882a613b67467e38a1d47f01e
manim
code_mobject.py
16
25
https://github.com/ManimCommunity/manim.git
2
103
0
37
170
Python
{ "docstring": "Function to generate html string with code highlighted and stores in variable html_string.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
def _gen_html_string(self): self.html_string = _hilite_me( self.code_string, self.language, self.style, self.insert_line_no, "border:solid gray;border-width:.1em .1em .1em .8em;padding:.2em .6em;", self.file_path, self.line_no_from, ) if self.generate_html_file: os.makedirs( os.path.join("assets", "codes", "generated_html_files"), exist_ok=True, ) with open( os.path.join( "assets", "codes", "generated_html_files", self.file_name + ".html", ), "w", ) as file: file.write(self.html_string)
4,155
22,074
49
pipenv/patched/pip/_vendor/requests/cookies.py
14
6
def __getstate__(self): state = self.__di
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
__getstate__
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
cookies.py
9
4
https://github.com/pypa/pipenv.git
1
23
0
13
44
Python
{ "docstring": "Unlike a normal CookieJar, this class is pickleable.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def __getstate__(self): state = self.__dict__.copy() # remove the unpickleable RLock object state.pop("_cookies_lock") return state