ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
5,241
29,606
140
saleor/plugins/base_plugin.py
39
10
def _clean_configuration_value(cls, item_type, new_value):
Fix plugin configuration (#11278) * Fix updating plugin configuration * Fix failing tax migration
_clean_configuration_value
eac1ae9cf107b8b0189b8b21ff6668c4131c6a00
saleor
base_plugin.py
11
10
https://github.com/saleor/saleor.git
5
48
0
31
80
Python
{ "docstring": "Clean the value that is saved in plugin configuration.\n\n Change the string provided as boolean into the bool value.\n Return None for Output type, as it's read only field.\n ", "language": "en", "n_whitespaces": 50, "n_words": 29, "vocab_size": 26 }
def _clean_configuration_value(cls, item_type, new_value): if ( item_type == ConfigurationTypeField.BOOLEAN and new_value and not isinstance(new_value, bool) ): new_value = new_value.lower() == "true" if item_type == ConfigurationTypeField.OUTPUT: # OUTPUT field is read only. No need to update it return return new_value
12,025
60,232
62
code/deep/BJMMD/caffe/python/caffe/coord_map.py
47
3
def coord_map_from_to(top_from, top_to): # We need to find a common ancestor of top_from and top_to. # We'll assume that all ancestors are equivalent here (otherwise the graph # is an inconsistent state (which we could improve this to check for)
Balanced joint maximum mean discrepancy for deep transfer learning
coord_map_from_to
cc4d0564756ca067516f71718a3d135996525909
transferlearning
coord_map.py
6
28
https://github.com/jindongwang/transferlearning.git
8
177
0
42
19
Python
{ "docstring": "\n Determine the coordinate mapping betweeen a top (from) and a top (to).\n Walk the graph to find a common ancestor while composing the coord maps for\n from and to until they meet. As a last step the from map is inverted.\n ", "language": "en", "n_whitespaces": 54, "n_words": 41, "vocab_size": 31 }
def coord_map_from_to(top_from, top_to): # We need to find a common ancestor of top_from and top_to. # We'll assume that all ancestors are equivalent here (otherwise the graph # is an inconsistent state (which we could improve this to check for)). # For now use a brute-force algorithm.
13,985
65,678
19
erpnext/controllers/stock_controller.py
31
16
def get_conditions_to_validate_future_sle(sl_entries): warehouse_items_map = {} for entry in sl_entries: if entry.warehouse not in warehouse_ite
style: format code with black
get_conditions_to_validate_future_sle
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
stock_controller.py
16
13
https://github.com/frappe/erpnext.git
4
69
0
25
155
Python
{ "docstring": "warehouse = {frappe.db.escape(warehouse)}\n\t\t\t\tand item_code in ({', '.join(frappe.db.escape(item) for item in items)})", "language": "en", "n_whitespaces": 10, "n_words": 12, "vocab_size": 11 }
def get_conditions_to_validate_future_sle(sl_entries): warehouse_items_map = {} for entry in sl_entries: if entry.warehouse not in warehouse_items_map: warehouse_items_map[entry.warehouse] = set() warehouse_items_map[entry.warehouse].add(entry.item_code) or_conditions = [] for warehouse, items in warehouse_items_map.items(): or_conditions.append( f ) return or_conditions
77,180
262,317
324
TTS/tts/datasets/__init__.py
118
23
def split_dataset(items, eval_split_max_size=None, eval_split_size=0.01): speakers = [item["speaker_name"] for item in items] is_multi_speaker = len(set(speakers)) > 1 if eval_split_size > 1: eval_split_size = int(eval_split_size) else: if eval_split_max_size: eval_split_size = min(eval_split_max_size, int(len(items) * eval_split_size)) else: eval_split_size = int(len(items) * eval_split_size) assert ( eval_split_size > 0 ), " [!] You do not have enough samples for the evaluation set. You can work around this setting the 'eval_split_size' parameter to a minimum of {}".format( 1 / len(items) ) np.random.seed(
Make style and lint
split_dataset
1425a023fe4bc6bda8578295aeeeb02af78cc082
TTS
__init__.py
18
30
https://github.com/coqui-ai/TTS.git
8
219
0
82
347
Python
{ "docstring": "Split a dataset into train and eval. Consider speaker distribution in multi-speaker training.\n\n Args:\n <<<<<<< HEAD\n items (List[List]):\n A list of samples. Each sample is a list of `[audio_path, text, speaker_id]`.\n\n eval_split_max_size (int):\n Number maximum of samples to be used for evaluation in proportion split. Defaults to None (Disabled).\n\n eval_split_size (float):\n If between 0.0 and 1.0 represents the proportion of the dataset to include in the evaluation set.\n If > 1, represents the absolute number of evaluation samples. Defaults to 0.01 (1%).\n =======\n items (List[List]): A list of samples. Each sample is a list of `[text, audio_path, speaker_id]`.\n >>>>>>> Fix docstring\n ", "language": "en", "n_whitespaces": 224, "n_words": 101, "vocab_size": 65 }
def split_dataset(items, eval_split_max_size=None, eval_split_size=0.01): speakers = [item["speaker_name"] for item in items] is_multi_speaker = len(set(speakers)) > 1 if eval_split_size > 1: eval_split_size = int(eval_split_size) else: if eval_split_max_size: eval_split_size = min(eval_split_max_size, int(len(items) * eval_split_size)) else: eval_split_size = int(len(items) * eval_split_size) assert ( eval_split_size > 0 ), " [!] You do not have enough samples for the evaluation set. You can work around this setting the 'eval_split_size' parameter to a minimum of {}".format( 1 / len(items) ) np.random.seed(0) np.random.shuffle(items) if is_multi_speaker: items_eval = [] speakers = [item["speaker_name"] for item in items] speaker_counter = Counter(speakers) while len(items_eval) < eval_split_size: item_idx = np.random.randint(0, len(items)) speaker_to_be_removed = items[item_idx]["speaker_name"] if speaker_counter[speaker_to_be_removed] > 1: items_eval.append(items[item_idx]) speaker_counter[speaker_to_be_removed] -= 1 del items[item_idx] return items_eval, items return items[:eval_split_size], items[eval_split_size:]
71,979
247,891
273
tests/rest/admin/test_media.py
67
21
def test_quarantine_media(self) -> None: media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None self.assertFalse(media_info["quarantined_by"]) # quarantining channel = self.make_request( "POST", self.url % ("quarantine", self.server_name, self.media_id), access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body) media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None self.assertTrue(media_info["quarantined_by"]) # remove from quarantine channel = self.make_request( "POST", self.url % ("unquarantine", self.server_name, self.media_id), access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body) media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None self.assertFalse(media_info["quarantined_by"])
Add type hints for `tests/unittest.py`. (#12347) In particular, add type hints for get_success and friends, which are then helpful in a bunch of places.
test_quarantine_media
f0b03186d96305fd44d74a89bf4230beec0c5c31
synapse
test_media.py
11
27
https://github.com/matrix-org/synapse.git
1
215
0
33
340
Python
{ "docstring": "\n Tests that quarantining and remove from quarantine a media is successfully\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
def test_quarantine_media(self) -> None: media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None self.assertFalse(media_info["quarantined_by"]) # quarantining channel = self.make_request( "POST", self.url % ("quarantine", self.server_name, self.media_id), access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body) media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None self.assertTrue(media_info["quarantined_by"]) # remove from quarantine channel = self.make_request( "POST", self.url % ("unquarantine", self.server_name, self.media_id), access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body) media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None self.assertFalse(media_info["quarantined_by"])
41,794
176,254
804
networkx/algorithms/community/modularity_max.py
250
29
def naive_greedy_modularity_communities(G, resolution=1, weight=None): r # First create one community for each node communities = list(frozenset([u]) for u in G.nodes()) # Track merges merges = [] # Greedily merge communities until no improvement is possible old_modularity = None new_modularity = modularity(G, communities, resolution=resolution, weight=weight) while old_modularity is None or new_modularity > old_modularity: # Save modularity for comparison old_modularity = new_modularity # Find best pair to merge trial_communities = list(communities) to_merge = None for i, u in enumerate(communities): for j, v in enumerate(communities): # Skip i==j and empty communities if j <= i or len(u) == 0 or len(v) == 0: continue # Merge communities u and v trial_communities[j] = u | v trial_communities[i] = frozenset([]) trial_modularity = modularity( G, trial_communities, resolution=resolution, weight=weight ) if trial_modularity >= new_modularity: # Check if strictly better or tie if trial_modularity > new_modularity: # Found new best, save modularity and group indexes new_modularity = trial_modularity to_merge = (i, j, new_modularity - old_modularity) elif to_merge and min(i, j) < min(to_merge[0], to_merge[1]):
Add weights to karate club graph (#5285) Add weights to the karate_club_graph. Modifies `non_randomness` and `naive_greedy_modularity_communities` to accept a `weight` parameter and modifies tests that use the kcg accordingly Co-authored-by: Kevin Berry <kevin.berry@worthix.com> Co-authored-by: Dan Schult <dschult@colgate.edu>
naive_greedy_modularity_communities
290ebce534b84f9db20ec58b98cbb170e65a0ba1
networkx
modularity_max.py
19
80
https://github.com/networkx/networkx.git
16
301
0
136
472
Python
{ "docstring": "Find communities in G using greedy modularity maximization.\n\n This implementation is O(n^4), much slower than alternatives, but it is\n provided as an easy-to-understand reference implementation.\n\n Greedy modularity maximization begins with each node in its own community\n and joins the pair of communities that most increases modularity until no\n such pair exists.\n\n This function maximizes the generalized modularity, where `resolution`\n is the resolution parameter, often expressed as $\\gamma$.\n See :func:`~networkx.algorithms.community.quality.modularity`.\n\n Parameters\n ----------\n G : NetworkX graph\n\n resolution : float (default=1)\n If resolution is less than 1, modularity favors larger communities.\n Greater than 1 favors smaller communities.\n\n weight : string or None, optional (default=None)\n The name of an edge attribute that holds the numerical value used\n as a weight. If None, then each edge has weight 1.\n The degree is the sum of the edge weights adjacent to the node.\n\n Returns\n -------\n list\n A list of sets of nodes, one for each community.\n Sorted by length with largest communities first.\n\n Examples\n --------\n >>> from networkx.algorithms.community import \\\n ... naive_greedy_modularity_communities\n >>> G = nx.karate_club_graph()\n >>> c = naive_greedy_modularity_communities(G)\n >>> sorted(c[0])\n [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33]\n\n See Also\n --------\n greedy_modularity_communities\n modularity\n ", "language": "en", "n_whitespaces": 336, "n_words": 199, "vocab_size": 146 }
def naive_greedy_modularity_communities(G, resolution=1, weight=None): r # First create one community for each node communities = list(frozenset([u]) for u in G.nodes()) # Track merges merges = [] # Greedily merge communities until no improvement is possible old_modularity = None new_modularity = modularity(G, communities, resolution=resolution, weight=weight) while old_modularity is None or new_modularity > old_modularity: # Save modularity for comparison old_modularity = new_modularity # Find best pair to merge trial_communities = list(communities) to_merge = None for i, u in enumerate(communities): for j, v in enumerate(communities): # Skip i==j and empty communities if j <= i or len(u) == 0 or len(v) == 0: continue # Merge communities u and v trial_communities[j] = u | v trial_communities[i] = frozenset([]) trial_modularity = modularity( G, trial_communities, resolution=resolution, weight=weight ) if trial_modularity >= new_modularity: # Check if strictly better or tie if trial_modularity > new_modularity: # Found new best, save modularity and group indexes new_modularity = trial_modularity to_merge = (i, j, new_modularity - old_modularity) elif to_merge and min(i, j) < min(to_merge[0], to_merge[1]): # Break ties by choosing pair with lowest min id new_modularity = trial_modularity to_merge = (i, j, new_modularity - old_modularity) # Un-merge trial_communities[i] = u trial_communities[j] = v if to_merge is not None: # If the best merge improves modularity, use it merges.append(to_merge) i, j, dq = to_merge u, v = communities[i], communities[j] communities[j] = u | v communities[i] = frozenset([]) # Remove empty communities and sort return sorted((c for c in communities if len(c) > 0), key=len, reverse=True) # old name _naive_greedy_modularity_communities = naive_greedy_modularity_communities
7,717
42,747
871
airflow/providers/microsoft/psrp/hooks/psrp.py
167
43
def invoke(self) -> Generator[PowerShell, None, None]: logger = copy(self.log) logger.setLevel(self._logging_level) local_context = self._conn is None if local_context: self.__enter__() try: assert self._conn is not None ps = PowerShell(self._conn) yield ps ps.begin_invoke() streams = [ ps.output, ps.streams.debug, ps.streams.error, ps.streams.information, ps.streams.progress, ps.streams.verbose, ps.streams.warning, ] offsets = [0 for _ in streams] # We're using polling to make sure output and streams are # handled while the process is running. while ps.state == PSInvocationState.RUNNING: ps.poll_invoke(timeout=self._operation_timeout)
Ensure @contextmanager decorates generator func (#23103)
invoke
e58985598f202395098e15b686aec33645a906ff
airflow
psrp.py
19
45
https://github.com/apache/airflow.git
11
264
0
116
420
Python
{ "docstring": "\n Context manager that yields a PowerShell object to which commands can be\n added. Upon exit, the commands will be invoked.\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 18 }
def invoke(self) -> Generator[PowerShell, None, None]: logger = copy(self.log) logger.setLevel(self._logging_level) local_context = self._conn is None if local_context: self.__enter__() try: assert self._conn is not None ps = PowerShell(self._conn) yield ps ps.begin_invoke() streams = [ ps.output, ps.streams.debug, ps.streams.error, ps.streams.information, ps.streams.progress, ps.streams.verbose, ps.streams.warning, ] offsets = [0 for _ in streams] # We're using polling to make sure output and streams are # handled while the process is running. while ps.state == PSInvocationState.RUNNING: ps.poll_invoke(timeout=self._operation_timeout) for i, stream in enumerate(streams): offset = offsets[i] while len(stream) > offset: record = stream[offset] # Records received on the output stream during job # status polling are handled via an optional callback, # while the other streams are simply logged. if stream is ps.output: if self._on_output_callback is not None: self._on_output_callback(record) else: self._log_record(logger.log, record) offset += 1 offsets[i] = offset # For good measure, we'll make sure the process has # stopped running in any case. ps.end_invoke() self.log.info("Invocation state: %s", str(PSInvocationState(ps.state))) if ps.streams.error: raise AirflowException("Process had one or more errors") finally: if local_context: self.__exit__(None, None, None)
117,210
320,536
38
src/documents/tests/test_task_signals.py
10
12
def util_call_before_task_publish_handler(self, headers_to_use, body_to_use): self.assertEqual(PaperlessTask.objects.all().count(), 0) before_task_publish_handler(headers=headers_to_use, body=body_to_use)
Switches task serialization over to pickle format
util_call_before_task_publish_handler
97d6503fefc5737028637c39a2c1f33dd1e12904
paperless-ngx
test_task_signals.py
12
4
https://github.com/paperless-ngx/paperless-ngx.git
1
56
0
9
90
Python
{ "docstring": "\n Simple utility to call the pre-run handle and ensure it created a single task\n instance\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
def util_call_before_task_publish_handler(self, headers_to_use, body_to_use): self.assertEqual(PaperlessTask.objects.all().count(), 0) before_task_publish_handler(headers=headers_to_use, body=body_to_use) self.assertEqual(PaperlessTask.objects.all().count(), 1)
15,807
71,963
84
wagtail/admin/tests/test_edit_handlers.py
21
15
def test_form(self): form = self.EventPageForm(instance=self.event_page) self.assertIn("comments", form.formsets) comments_formset = form.formsets["comments"] self.assertEqual(len(comments_formset.forms), 1) self.asse
Reformat with black
test_form
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_edit_handlers.py
10
9
https://github.com/wagtail/wagtail.git
1
109
0
18
174
Python
{ "docstring": "\n Check that the form has the comments/replies formsets, and that the\n user has been set on each CommentForm/CommentReplyForm subclass\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 15 }
def test_form(self): form = self.EventPageForm(instance=self.event_page) self.assertIn("comments", form.formsets) comments_formset = form.formsets["comments"] self.assertEqual(len(comments_formset.forms), 1) self.assertEqual(comments_formset.forms[0].user, self.commenting_user) replies_formset = comments_formset.forms[0].formsets["replies"] self.assertEqual(len(replies_formset.forms), 2) self.assertEqual(replies_formset.forms[0].user, self.commenting_user)
7,629
42,569
180
nltk/corpus/reader/wordnet.py
38
15
def _doc(self, doc_type, default, lang="eng"): corpus = self._wordnet_corpus_reader if lang not in corpus.langs(): return None elif lang == "eng": return default else: corpus._load_lang_data(lang) of = corpus.ss2of(self) i = corpus.lg_attrs.index(doc_type) if of in corpus._lang_data[lang][i]: return corpus._lang_data[lang][i][
Fix wordnet's all_synsets() function (#3078) * Fix all_synsets() function * Add simple regression tests for #3077 * Add suggestions by @tomaarsen Co-authored-by: Tom Aarsen <Cubiegamedev@gmail.com>
_doc
3ca43e26efd7d5aa37b3cd79446258d8bfa79561
nltk
wordnet.py
14
14
https://github.com/nltk/nltk.git
4
94
0
27
151
Python
{ "docstring": "Helper method for Synset.definition and Synset.examples", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
def _doc(self, doc_type, default, lang="eng"): corpus = self._wordnet_corpus_reader if lang not in corpus.langs(): return None elif lang == "eng": return default else: corpus._load_lang_data(lang) of = corpus.ss2of(self) i = corpus.lg_attrs.index(doc_type) if of in corpus._lang_data[lang][i]: return corpus._lang_data[lang][i][of] else: return None
39,392
163,184
221
pandas/core/arrays/categorical.py
73
30
def map(self, mapper): new_categories = self.categories.map(mapper) try: return self.from_codes( self._codes.copy(), categories=new_categories, ordered=self.ordered ) except ValueE
DOC: Improve doc summaries in series.rst (#45237)
map
521259299f7829da667ba39302ec77acedde9e5e
pandas
categorical.py
15
10
https://github.com/pandas-dev/pandas.git
3
85
0
57
216
Python
{ "docstring": "\n Map categories using an input mapping or function.\n\n Maps the categories to new categories. If the mapping correspondence is\n one-to-one the result is a :class:`~pandas.Categorical` which has the\n same order property as the original, otherwise a :class:`~pandas.Index`\n is returned. NaN values are unaffected.\n\n If a `dict` or :class:`~pandas.Series` is used any unmapped category is\n mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`\n will be returned.\n\n Parameters\n ----------\n mapper : function, dict, or Series\n Mapping correspondence.\n\n Returns\n -------\n pandas.Categorical or pandas.Index\n Mapped categorical.\n\n See Also\n --------\n CategoricalIndex.map : Apply a mapping correspondence on a\n :class:`~pandas.CategoricalIndex`.\n Index.map : Apply a mapping correspondence on an\n :class:`~pandas.Index`.\n Series.map : Apply a mapping correspondence on a\n :class:`~pandas.Series`.\n Series.apply : Apply more complex functions on a\n :class:`~pandas.Series`.\n\n Examples\n --------\n >>> cat = pd.Categorical(['a', 'b', 'c'])\n >>> cat\n ['a', 'b', 'c']\n Categories (3, object): ['a', 'b', 'c']\n >>> cat.map(lambda x: x.upper())\n ['A', 'B', 'C']\n Categories (3, object): ['A', 'B', 'C']\n >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})\n ['first', 'second', 'third']\n Categories (3, object): ['first', 'second', 'third']\n\n If the mapping is one-to-one the ordering of the categories is\n preserved:\n\n >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)\n >>> cat\n ['a', 'b', 'c']\n Categories (3, object): ['a' < 'b' < 'c']\n >>> cat.map({'a': 3, 'b': 2, 'c': 1})\n [3, 2, 1]\n Categories (3, int64): [3 < 2 < 1]\n\n If the mapping is not one-to-one an :class:`~pandas.Index` is returned:\n\n >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})\n Index(['first', 'second', 'first'], dtype='object')\n\n If a `dict` is used, all unmapped categories are mapped to `NaN` and\n the result is an :class:`~pandas.Index`:\n\n >>> cat.map({'a': 'first', 'b': 'second'})\n Index(['first', 'second', nan], dtype='object')\n ", "language": "en", "n_whitespaces": 679, "n_words": 269, "vocab_size": 134 }
def map(self, mapper): new_categories = self.categories.map(mapper) try: return self.from_codes( self._codes.copy(), categories=new_categories, ordered=self.ordered ) except ValueError: # NA values are represented in self._codes with -1 # np.take causes NA values to take final element in new_categories if np.any(self._codes == -1): new_categories = new_categories.insert(len(new_categories), np.nan) return np.take(new_categories, self._codes) __eq__ = _cat_compare_op(operator.eq) __ne__ = _cat_compare_op(operator.ne) __lt__ = _cat_compare_op(operator.lt) __gt__ = _cat_compare_op(operator.gt) __le__ = _cat_compare_op(operator.le) __ge__ = _cat_compare_op(operator.ge) # ------------------------------------------------------------- # Validators; ideally these can be de-duplicated
36,295
155,204
91
modin/experimental/core/execution/unidist/implementations/pandas_on_unidist/io/io.py
26
11
def to_pickle_distributed(cls, qc, **kwargs): if not ( isinstance(kwargs["filepath_or_buffer"], str) and "*" in kwargs["filepath_or_buffer"] ) or not isinstance(qc, PandasQueryCompiler): warnings.warn("Defaulting to Modin core implementation") return PandasO
FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059) Signed-off-by: Igoshev, Iaroslav <iaroslav.igoshev@intel.com>
to_pickle_distributed
193505fdf0c984743397ba3df56262f30aee13a8
modin
io.py
13
12
https://github.com/modin-project/modin.git
4
93
0
25
95
Python
{ "docstring": "\n When `*` in the filename all partitions are written to their own separate file.\n\n The filenames is determined as follows:\n - if `*` in the filename then it will be replaced by the increasing sequence 0, 1, 2, …\n - if `*` is not the filename, then will be used default implementation.\n\n Examples #1: 4 partitions and input filename=\"partition*.pkl.gz\", then filenames will be:\n `partition0.pkl.gz`, `partition1.pkl.gz`, `partition2.pkl.gz`, `partition3.pkl.gz`.\n\n Parameters\n ----------\n qc : BaseQueryCompiler\n The query compiler of the Modin dataframe that we want\n to run ``to_pickle_distributed`` on.\n **kwargs : dict\n Parameters for ``pandas.to_pickle(**kwargs)``.\n ", "language": "en", "n_whitespaces": 203, "n_words": 92, "vocab_size": 70 }
def to_pickle_distributed(cls, qc, **kwargs): if not ( isinstance(kwargs["filepath_or_buffer"], str) and "*" in kwargs["filepath_or_buffer"] ) or not isinstance(qc, PandasQueryCompiler): warnings.warn("Defaulting to Modin core implementation") return PandasOnUnidistIO.to_pickle(qc, **kwargs)
51,080
205,304
42
django/db/migrations/loader.py
10
8
def project_state(self, nodes=None, at_end=True): return self.graph.make_state( nodes=nodes, at_end=at_end, real_apps=self.unmigrated_apps )
Refs #33476 -- Reformatted code with Black.
project_state
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
loader.py
9
4
https://github.com/django/django.git
1
35
0
10
53
Python
{ "docstring": "\n Return a ProjectState object representing the most recent state\n that the loaded migrations represent.\n\n See graph.make_state() for the meaning of \"nodes\" and \"at_end\".\n ", "language": "en", "n_whitespaces": 52, "n_words": 23, "vocab_size": 21 }
def project_state(self, nodes=None, at_end=True): return self.graph.make_state( nodes=nodes, at_end=at_end, real_apps=self.unmigrated_apps )
39,282
162,744
375
research/neo_peq/legacy_frequency_response.py
125
26
def center(self, frequency=1000): equal_energy_fr = self.__class__(name='equal_energy', frequency=self.frequency.copy(), raw=self.raw.copy()) equal_energy_fr.interpolate() interpolator = InterpolatedUnivariateSpline(np.log10(equal_energy_fr.frequency), equal_energy_fr.raw, k=1) if type(frequency) in [list, np.ndarray] and len(frequency) > 1: # Use the average of the gain values between the given frequencies as the difference to be subtracted diff = np.mean(equal_energy_fr.raw[np.logical_and( equal_energy_fr.frequency >= frequency[0], equal_energy_fr.frequency <= frequency[1] )]) else: if type(frequency) in [list, np.ndarray]: # List or array with only one element frequency = frequency[0] # Use the gain value a
Added PEQ configs to CLI and function interfaces. Improved default value handling for PEQ parameters and added more predefined configs. Removed legacy PEQ optimization. Fixed readme write. Improved shelf filter initialization. Added plot method to PEQ. Notebook for comparing old and new optimizers. Bug fixes.
center
9120cdffe618c6c2ff16fe6a311b6a1367efdbc8
AutoEq
legacy_frequency_response.py
15
22
https://github.com/jaakkopasanen/AutoEq.git
7
225
0
87
353
Python
{ "docstring": "Removed bias from frequency response.\n\n Args:\n frequency: Frequency which is set to 0 dB. If this is a list with two values then an average between the two\n frequencies is set to 0 dB.\n\n Returns:\n Gain shifted\n ", "language": "en", "n_whitespaces": 102, "n_words": 37, "vocab_size": 30 }
def center(self, frequency=1000): equal_energy_fr = self.__class__(name='equal_energy', frequency=self.frequency.copy(), raw=self.raw.copy()) equal_energy_fr.interpolate() interpolator = InterpolatedUnivariateSpline(np.log10(equal_energy_fr.frequency), equal_energy_fr.raw, k=1) if type(frequency) in [list, np.ndarray] and len(frequency) > 1: # Use the average of the gain values between the given frequencies as the difference to be subtracted diff = np.mean(equal_energy_fr.raw[np.logical_and( equal_energy_fr.frequency >= frequency[0], equal_energy_fr.frequency <= frequency[1] )]) else: if type(frequency) in [list, np.ndarray]: # List or array with only one element frequency = frequency[0] # Use the gain value at the given frequency as the difference to be subtracted diff = interpolator(np.log10(frequency)) self.raw -= diff if len(self.smoothed): self.smoothed -= diff if len(self.error): self.error += diff if len(self.error_smoothed): self.error_smoothed += diff # Everything but raw, smoothed, errors and target is affected by centering, reset them self.reset(raw=False, smoothed=False, error=False, error_smoothed=False, target=False) return -diff
51,569
206,570
98
django/utils/cache.py
51
9
def _i18n_cache_key_suffix(request, cache_key): if settings.USE_I18N: # first check if LocaleMiddleware or another middleware added # LANGUAGE_CODE to request, then fall back to the active lan
Refs #33476 -- Reformatted code with Black.
_i18n_cache_key_suffix
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
cache.py
13
6
https://github.com/django/django.git
3
41
0
38
76
Python
{ "docstring": "If necessary, add the current locale or time zone to the cache key.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
def _i18n_cache_key_suffix(request, cache_key): if settings.USE_I18N: # first check if LocaleMiddleware or another middleware added # LANGUAGE_CODE to request, then fall back to the active language # which in turn can also fall back to settings.LANGUAGE_CODE cache_key += ".%s" % getattr(request, "LANGUAGE_CODE", get_language()) if settings.USE_TZ: cache_key += ".%s" % get_current_timezone_name() return cache_key
arff_file = BytesIO( textwrap.dedent( """
76,098
260,158
37
sklearn/datasets/tests/test_arff_parser.py
9
9
def test_pandas_arff_parser_strip_double_quotes(parser_func): pd =
FIX make pandas and liac arff parser quoting behaviour closer (#23497) Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> Co-authored-by: Loïc Estève <loic.esteve@ymail.com>
test_pandas_arff_parser_strip_double_quotes
8515b486810e844bc7f5f1a4fb2227405d46871e
scikit-learn
test_arff_parser.py
9
54
https://github.com/scikit-learn/scikit-learn.git
1
186
1
8
39
Python
{ "docstring": "Check that we properly strip double quotes from the data.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def test_pandas_arff_parser_strip_double_quotes(parser_func): pd = pytest.importorskip("pandas") arff_file = BytesIO( textwrap.dedent(
3,369
20,440
1,512
pipenv/patched/notpip/_vendor/pygments/lexer.py
193
30
def get_tokens_unprocessed(self, text=None, context=None): tokendefs = self._tokens if not context: ctx = LexerContext(text, 0) statetokens = tokendefs['root'] else: ctx = context statetokens = tokendefs[ctx.stack[-1]] text = ctx.text while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, ctx.pos, ctx.end) if m: if action is not None: if type(action) is _TokenType: yield ctx.pos, action, m.group() ctx.pos = m.end() else: yield from action(self, m, ctx) if not new_state: # altered the state stack? statetokens = tokendefs[ctx.stack[-1]] # CAUTION: callback must set ctx.pos! if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': if len(ctx.stack) > 1: ctx.stack.pop() elif state
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
get_tokens_unprocessed
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
lexer.py
24
56
https://github.com/pypa/pipenv.git
20
373
0
108
609
Python
{ "docstring": "\n Split ``text`` into (tokentype, text) pairs.\n If ``context`` is given, use this lexer context instead.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
def get_tokens_unprocessed(self, text=None, context=None): tokendefs = self._tokens if not context: ctx = LexerContext(text, 0) statetokens = tokendefs['root'] else: ctx = context statetokens = tokendefs[ctx.stack[-1]] text = ctx.text while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, ctx.pos, ctx.end) if m: if action is not None: if type(action) is _TokenType: yield ctx.pos, action, m.group() ctx.pos = m.end() else: yield from action(self, m, ctx) if not new_state: # altered the state stack? statetokens = tokendefs[ctx.stack[-1]] # CAUTION: callback must set ctx.pos! if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': if len(ctx.stack) > 1: ctx.stack.pop() elif state == '#push': ctx.stack.append(ctx.stack[-1]) else: ctx.stack.append(state) elif isinstance(new_state, int): # see RegexLexer for why this check is made if abs(new_state) >= len(ctx.stack): del ctx.state[1:] else: del ctx.stack[new_state:] elif new_state == '#push': ctx.stack.append(ctx.stack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = tokendefs[ctx.stack[-1]] break else: try: if ctx.pos >= ctx.end: break if text[ctx.pos] == '\n': # at EOL, reset state to "root" ctx.stack = ['root'] statetokens = tokendefs['root'] yield ctx.pos, Text, '\n' ctx.pos += 1 continue yield ctx.pos, Error, text[ctx.pos] ctx.pos += 1 except IndexError: break
30,625
135,458
230
rllib/core/rl_module/torch/tests/test_torch_marl_module.py
67
16
def get_policy_data_from_agent_data(agent_data, policy_map_fn): policy_data = {} for agent_id, data in agent_data.items(): policy_id = policy_map_fn(agent_id) policy_data.setdefault(policy_id, {}) policy_data[policy_id].setdefault("agent_id", []) if data["obs"].ndim == 1: policy_data[policy_id]["agent_id"].append(agent_id) else: policy_data[policy_id]["agent_id"] += [agent_id] * len(data["obs"]) for k, v in data.items(): policy_data[policy_id].setdefault(k, []) if v.ndim == 1: v = v[None] policy_data[policy_id][k].append(v) for policy_id in policy_data: policy_data[policy_id] = { k: np.concatenate(v) if k != "agent_id" else v for k, v in policy_data[policy_id].items() } return policy_data
[RLlib] MARLModule, RLModule PR 4/N (N=4) (#29449) Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com>
get_policy_data_from_agent_data
30058267363b8de16b809c987bb1f7d7befad24d
ray
test_torch_marl_module.py
16
21
https://github.com/ray-project/ray.git
8
182
0
47
291
Python
{ "docstring": "Utility function to get policy data from agent data and policy map function.\n\n It also keeps track of agent_id for each row so that we can retreive the agent\n level information after the forward pass.\n\n Returns:\n dict of module_id to module data\n ", "language": "en", "n_whitespaces": 61, "n_words": 42, "vocab_size": 35 }
def get_policy_data_from_agent_data(agent_data, policy_map_fn): policy_data = {} for agent_id, data in agent_data.items(): policy_id = policy_map_fn(agent_id) policy_data.setdefault(policy_id, {}) policy_data[policy_id].setdefault("agent_id", []) if data["obs"].ndim == 1: policy_data[policy_id]["agent_id"].append(agent_id) else: policy_data[policy_id]["agent_id"] += [agent_id] * len(data["obs"]) for k, v in data.items(): policy_data[policy_id].setdefault(k, []) if v.ndim == 1: v = v[None] policy_data[policy_id][k].append(v) for policy_id in policy_data: policy_data[policy_id] = { k: np.concatenate(v) if k != "agent_id" else v for k, v in policy_data[policy_id].items() } return policy_data
44,390
183,911
73
src/textual/widgets/_data_table.py
23
13
def _update_dimensions(self) -> None: total_width = sum(column.width for column in self.columns) s
docstring name change
_update_dimensions
c3dcc529b3aa0b168728b3315cfe973218d09685
textual
_data_table.py
12
7
https://github.com/Textualize/textual.git
3
50
0
22
78
Python
{ "docstring": "Called to recalculate the virtual (scrollable) size.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
def _update_dimensions(self) -> None: total_width = sum(column.width for column in self.columns) self.virtual_size = Size( total_width, len(self._y_offsets) + (self.header_height if self.show_header else 0), )
50,666
204,168
49
django/contrib/messages/storage/base.py
17
7
def _store(self, messages, response, *args, **kwargs): raise NotImplementedError( "subclasses of BaseStorage mu
Refs #33476 -- Reformatted code with Black.
_store
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
base.py
8
4
https://github.com/django/django.git
1
21
0
17
35
Python
{ "docstring": "\n Store a list of messages and return a list of any messages which could\n not be stored.\n\n One type of object must be able to be stored, ``Message``.\n\n **This method must be implemented by a subclass.**\n ", "language": "en", "n_whitespaces": 72, "n_words": 36, "vocab_size": 26 }
def _store(self, messages, response, *args, **kwargs): raise NotImplementedError( "subclasses of BaseStorage must provide a _store() method" )
72,812
249,309
86
tests/rest/admin/test_event_reports.py
18
13
def test_from_is_negative(self) -> None: channel = self.make_request( "GET", self.url + "?from=-5", access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.jso
Use literals in place of `HTTPStatus` constants in tests (#13488) * Use literals in place of `HTTPStatus` constants in tests * newsfile * code style * code style
test_from_is_negative
2281427175e4c93a30c39607fb4ac23c2a1f399f
synapse
test_event_reports.py
10
11
https://github.com/matrix-org/synapse.git
1
60
0
18
97
Python
{ "docstring": "\n Testing that a negative from parameter returns a 400\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 8 }
def test_from_is_negative(self) -> None: channel = self.make_request( "GET", self.url + "?from=-5", access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
45,340
186,112
8
tests/test_binding_inheritance.py
5
1
async def test_focused_child_widget_no_inherit_empty_bindings_with_movement_bindings_on_screen() -> None:
Add test for focused widget, no inherit, empty BINDINGS Testing the overlap between #1343 and #1351.
test_focused_child_widget_no_inherit_empty_bindings_with_movement_bindings_on_screen
e8c87ced33ccac893121e3cc0fb1097b0d8da035
textual
test_binding_inheritance.py
6
5
https://github.com/Textualize/textual.git
2
53
0
5
16
Python
{ "docstring": "A focused child widget, that doesn't inherit bindings and sets BINDINGS empty, with movement bindings in the screen, should trigger screen actions.", "language": "en", "n_whitespaces": 21, "n_words": 22, "vocab_size": 21 }
async def test_focused_child_widget_no_inherit_empty_bindings_with_movement_bindings_on_screen() -> None:
55,486
218,798
1,039
python3.10.4/Lib/lib2to3/pgen2/parse.py
220
28
def addtoken(self, type, value, context): # Map from token to label ilabel = self.classify(type, value, context) # Loop until the token is shifted; may raise exceptions while True: dfa, state, node = self.stack[-1] states, first = dfa arcs = states[state] # Look for a state with this label for i, newstate in arcs: t, v = self.grammar.labels[i] if ilabel == i: # Look it up in the list of labels assert t < 256 # Shift a token; we're done with it self.shift(type, value, newstate, context) # Pop while we are in an accept-only state state = newstate while states[state] == [(0, state)]: self.pop() if not self.stack: # Done parsing! return True dfa, state, node = self.stack[-1] states, first = dfa # Done with this token return False elif t >= 256: # See if it's a symbol and if we're in its first set itsdfa = self.grammar.dfas[t] itsstates, itsfirst = itsdfa if ilabel in itsfirst: # Push a symbol self.push(t, self.grammar.dfas[t], newstate, context) break # To continue the outer while loop else: if (0, state) in arcs: # An accepting state, pop it and try something else self.pop() if not self.stack: # Done parsing, but another token is input raise ParseError("too much input", type, value, context) else: # No success finding a transition raise ParseError("bad input", type, value, context)
add python 3.10.4 for windows
addtoken
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
parse.py
19
33
https://github.com/XX-net/XX-Net.git
10
232
0
123
365
Python
{ "docstring": "Add a token; return True iff this is the end of the program.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
def addtoken(self, type, value, context): # Map from token to label ilabel = self.classify(type, value, context) # Loop until the token is shifted; may raise exceptions while True: dfa, state, node = self.stack[-1] states, first = dfa arcs = states[state] # Look for a state with this label for i, newstate in arcs: t, v = self.grammar.labels[i] if ilabel == i: # Look it up in the list of labels assert t < 256 # Shift a token; we're done with it self.shift(type, value, newstate, context) # Pop while we are in an accept-only state state = newstate while states[state] == [(0, state)]: self.pop() if not self.stack: # Done parsing! return True dfa, state, node = self.stack[-1] states, first = dfa # Done with this token return False elif t >= 256: # See if it's a symbol and if we're in its first set itsdfa = self.grammar.dfas[t] itsstates, itsfirst = itsdfa if ilabel in itsfirst: # Push a symbol self.push(t, self.grammar.dfas[t], newstate, context) break # To continue the outer while loop else: if (0, state) in arcs: # An accepting state, pop it and try something else self.pop() if not self.stack: # Done parsing, but another token is input raise ParseError("too much input", type, value, context) else: # No success finding a transition raise ParseError("bad input", type, value, context)
8,055
43,771
464
airflow/settings.py
115
28
def import_local_settings(): try: import airflow_local_settings if hasattr(airflow_local_settings, "__all__"): for i in airflow_local_settings.__all__: globals()[i] = getattr(airflow_local_settings, i) else: for k, v in airflow_local_settings.__dict__.items(): if not k.startswith("__"): globals()[k] = v # TODO: Remove once deprecated if "policy" in globals() and "task_policy" not in globals(): warnings.warn( "Using `policy` in airflow_local_settings.py is deprecated. " "Please rename your `policy` to `task_policy`.", DeprecationWarning, stacklevel=2, ) globals()["task_policy"] = globals()["policy"] del globals()["policy"] if not hasattr(task_instance_mutation_hook, 'is_noop'): task_instance_mutation_hook.is_noop = False log.info("Loaded airflow_local_settings from %s .", airflow_local_settings.__file__) except ModuleNotFoundError as e: if e.name == "airflow_local_settings": log.debug("No airflow_local_settings to import.", exc_info=True) else: log.critical( "Failed to import airflow_local_settings due to a trans
Speed up creation of DagRun for large DAGs (5k+ tasks) by 25-130% (#20722) * Speed up creation of DagRun for large DAGs (5k+ tasks) by 15-40% This uses the "bulk" operation API of SQLAlchemy to get a big speed up. Due to the `task_instance_mutation_hook` we still need to keep actual TaskInstance objects around. For postgresql we have enabled to "batch operation helpers"[1] which makes it even faster. The default page sizes are chosen somewhat randomly based on the SQLA docs. To make these options configurable I have added (and used here and in KubeConfig) a new `getjson` option to AirflowConfigParser class. Postgresql is over 77% faster with bulk_save_objects: Before: ``` number_of_tis=1 mean=0.004397215199423954 per=0.004397215199423954 times=[0.009390181003254838, 0.002814065999700688, 0.00284132499655243, 0.0036120269942330196, 0.0033284770033787936] number_of_tis=10 mean=0.008078816600027494 per=0.0008078816600027494 times=[0.011014281000825576, 0.008476420000079088, 0.00741832799394615, 0.006857775995740667, 0.006627278009545989] number_of_tis=50 mean=0.01927847799670417 per=0.00038556955993408336 times=[0.02556803499464877, 0.01935569499619305, 0.01662322599440813, 0.01840184700267855, 0.01644358699559234] number_of_tis=100 mean=0.03301511880126782 per=0.00033015118801267817 times=[0.04117956099798903, 0.030890661000739783, 0.03007458901265636, 0.03125198099587578, 0.03167880199907813] number_of_tis=500 mean=0.15320950179593637 per=0.0003064190035918727 times=[0.20054609200451523, 0.14052859699586406, 0.14509809199080337, 0.1365471329918364, 0.1433275949966628] number_of_tis=1000 mean=0.2929377429973101 per=0.0002929377429973101 times=[0.3517978919990128, 0.2807794280088274, 0.2806490379880415, 0.27710555399244186, 0.27435680299822707] number_of_tis=3000 mean=0.9935687056015012 per=0.00033118956853383374 times=[1.2047388390055858, 0.8248025969951414, 0.8685875020019012, 0.9017027500085533, 1.1680118399963249] number_of_tis=5000 mean=1.5349355740036117 per=0.00030698711480072236 times=[1.8663743910001358, 1.5182018500054255, 1.5446484510030132, 1.3932801040064078, 1.3521730740030762] number_of_tis=10000 mean=3.7448632712010292 per=0.0003744863271201029 times=[4.135914924001554, 3.4411147559876554, 3.526543836007477, 3.7195197630062466, 3.9012230770022143] number_of_tis=15000 mean=6.3099766838044165 per=0.00042066511225362775 times=[6.552250057997298, 6.1369703890086384, 6.8749958210100885, 6.067943914007628, 5.917723236998427] number_of_tis=20000 mean=8.317583500797628 per=0.00041587917503988143 times=[8.720249108009739, 8.0188543760014, 8.328030352990027, 8.398350054994808, 8.122433611992165] ``` When using bulk_save_objects: ``` number_of_tis=20000 mean=4.678154367001843 per=0.00023390771835009216 times=[4.465847548010061, 4.571855771995615, 4.749505186002352, 4.724330568002188, 4.8792327609990025] ``` MySQL is only 10-15% faster (and a lot noisier) Before: ``` number_of_tis=1 mean=0.006164804595755413 per=0.006164804595755413 times=[0.013516580002033152, 0.00427598599344492, 0.004508020996581763, 0.004067091998877004, 0.004456343987840228] number_of_tis=10 mean=0.007822793803643435 per=0.0007822793803643434 times=[0.0081135170039488, 0.00719467100861948, 0.009007985994685441, 0.00758794900320936, 0.007209846007754095] number_of_tis=50 mean=0.020377356800599954 per=0.00040754713601199905 times=[0.02612382399092894, 0.018950315003166907, 0.019109474000288174, 0.018008680999628268, 0.019694490008987486] number_of_tis=100 mean=0.040682651600218375 per=0.00040682651600218374 times=[0.05449078499805182, 0.037430580996442586, 0.039291110006161034, 0.03625023599306587, 0.035950546007370576] number_of_tis=500 mean=0.18646696420037187 per=0.00037293392840074375 times=[0.24278165798750706, 0.17090376401029062, 0.1837275660072919, 0.16893767600413412, 0.1659841569926357] number_of_tis=1000 mean=0.5903461098030676 per=0.0005903461098030675 times=[0.6001852740009781, 0.5642872750031529, 0.686630773008801, 0.5578094649972627, 0.5428177620051429] number_of_tis=3000 mean=1.9076304554007948 per=0.0006358768184669316 times=[2.042052763994434, 2.1137778090051142, 1.7461599689995637, 1.7260139089921722, 1.9101478260126896] number_of_tis=5000 mean=2.9185905692051164 per=0.0005837181138410233 times=[2.9221124830073677, 3.2889883980096783, 2.7569778940087417, 2.973596281008213, 2.651277789991582] number_of_tis=10000 mean=8.880191986600403 per=0.0008880191986600403 times=[7.3548113360011484, 9.13715232499817, 9.568511486999341, 8.80206210000324, 9.538422685000114] number_of_tis=15000 mean=15.426499317999696 per=0.0010284332878666464 times=[14.944712879005237, 15.38737604500784, 15.409629273999599, 15.852925243991194, 15.53785314799461] number_of_tis=20000 mean=20.579332908798825 per=0.0010289666454399414 times=[20.362008597003296, 19.878823954990366, 20.73281196100288, 20.837948996995692, 21.085071034001885] ``` After: ``` number_of_tis=20000 mean=18.36637533060275 per=0.0009183187665301375 times=[17.728908119010157, 18.62269214099797, 18.936747477011522, 17.74613195299753, 18.797396962996572] ``` [1]: https://docs.sqlalchemy.org/en/13/dialects/postgresql.html#psycopg2-batch-mode * Use bulk_insert_mappings for even more speed where possible. It gives us an extra speed up over bulk_save_objects, but we can't use it when the task_instance_mutation_hook does anything, as that hook needs an actual object. So _when_ we know that hook won't do anything we switch in to insert_mappings mode. New speeds (vs baseline, not vs bulk_save_objects) when using bulk_insert_mappings PostgreSQL now 130% faster: ``` number_of_tis=1 mean=0.028053103599813767 per=0.028053103599813767 times=[0.03762496300623752, 0.02637488600157667, 0.025065611000172794, 0.024561002996051684, 0.026639054995030165] number_of_tis=10 mean=0.02647183560184203 per=0.002647183560184203 times=[0.02698062499985099, 0.026417658998980187, 0.027347976007149555, 0.025797458001761697, 0.025815460001467727] number_of_tis=50 mean=0.03149963079486042 per=0.0006299926158972085 times=[0.03810671299288515, 0.03055680700344965, 0.029733988994848914, 0.03016914198815357, 0.02893150299496483] number_of_tis=100 mean=0.033998635396710594 per=0.0003399863539671059 times=[0.0351028829900315, 0.03299884400621522, 0.03358584298985079, 0.03295094799250364, 0.03535465900495183] number_of_tis=500 mean=0.07903424859978259 per=0.00015806849719956516 times=[0.08279920800123364, 0.08588568199775182, 0.07312070899934042, 0.07360191999759991, 0.07976372400298715] number_of_tis=1000 mean=0.12571056479937398 per=0.00012571056479937398 times=[0.12573593499837443, 0.12141938100103289, 0.12616568499652203, 0.12907471299695317, 0.12615711000398733] number_of_tis=3000 mean=0.36025245799683037 per=0.00012008415266561012 times=[0.36071603700111154, 0.3470657339930767, 0.3373015969991684, 0.3337128989951452, 0.42246602299564984] number_of_tis=5000 mean=0.6916533229988999 per=0.00013833066459977998 times=[0.9647149289958179, 0.6451378140045563, 0.5970188640058041, 0.5849326960014878, 0.6664623119868338] number_of_tis=10000 mean=2.071472014003666 per=0.00020714720140036663 times=[2.957865878008306, 1.9388906149979448, 1.766649461002089, 1.8647991580073722, 1.8291549580026185] number_of_tis=15000 mean=2.866650845797267 per=0.00019111005638648446 times=[3.3783503199956613, 2.657773957995232, 2.707275656008278, 2.7875704979960574, 2.802283796991105] number_of_tis=20000 mean=3.5886989389982773 per=0.00017943494694991387 times=[3.969436354993377, 3.436962780993781, 3.9078941010084236, 3.6387251569976797, 2.9904763009981252] ``` MySQL is (only) 27% faster: ``` number_of_tis=1 mean=0.035956257799989545 per=0.035956257799989545 times=[0.03932315899874084, 0.03545605999534018, 0.03535486999317072, 0.034727805003058165, 0.03491939500963781] number_of_tis=10 mean=0.036957260797498746 per=0.0036957260797498745 times=[0.040442515004542656, 0.0379129799985094, 0.03494819799379911, 0.03562593398964964, 0.03585667700099293] number_of_tis=50 mean=0.04745422120031435 per=0.0009490844240062871 times=[0.06965546800347511, 0.04221734800375998, 0.04038520700123627, 0.040363031992455944, 0.04465005100064445] number_of_tis=100 mean=0.0528092162014218 per=0.000528092162014218 times=[0.06113427500531543, 0.04883724599494599, 0.05276876600692049, 0.047688748003565706, 0.05361704599636141] number_of_tis=500 mean=0.16223246100416872 per=0.0003244649220083374 times=[0.24469116200634744, 0.1407806619972689, 0.14792052800476085, 0.14703868801007047, 0.13073126500239596] number_of_tis=1000 mean=0.285728433605982 per=0.00028572843360598197 times=[0.3230128890136257, 0.27035739900020417, 0.3003890450054314, 0.2638379510026425, 0.2710448840080062] number_of_tis=3000 mean=1.1824120475997915 per=0.0003941373491999305 times=[1.3103130240051541, 1.286688863998279, 1.1455156929878285, 1.1072918410063721, 1.062250816001324] number_of_tis=5000 mean=1.9416745471942705 per=0.0003883349094388541 times=[2.3746965279860888, 1.9103765429899795, 2.0542518720030785, 1.7706374429981224, 1.598410349994083] number_of_tis=10000 mean=5.059874459402636 per=0.0005059874459402636 times=[5.431018351999228, 5.262124675995437, 5.174487816999317, 4.423381198008428, 5.008360254010768] number_of_tis=15000 mean=9.717965700797503 per=0.0006478643800531668 times=[7.884617075993447, 9.466949063993525, 10.005758297003922, 10.105231182998978, 11.127272883997648] number_of_tis=20000 mean=16.2008618004038 per=0.00081004309002019 times=[14.645835625007749, 16.304637463006657, 16.255490412993822, 16.830263861003914, 16.968081640006858] ```
import_local_settings
f2039b4c9e15b514661d4facbd710791fe0a2ef4
airflow
settings.py
18
34
https://github.com/apache/airflow.git
11
191
0
83
336
Python
{ "docstring": "Import airflow_local_settings.py files to allow overriding any configs in settings.py file", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def import_local_settings(): try: import airflow_local_settings if hasattr(airflow_local_settings, "__all__"): for i in airflow_local_settings.__all__: globals()[i] = getattr(airflow_local_settings, i) else: for k, v in airflow_local_settings.__dict__.items(): if not k.startswith("__"): globals()[k] = v # TODO: Remove once deprecated if "policy" in globals() and "task_policy" not in globals(): warnings.warn( "Using `policy` in airflow_local_settings.py is deprecated. " "Please rename your `policy` to `task_policy`.", DeprecationWarning, stacklevel=2, ) globals()["task_policy"] = globals()["policy"] del globals()["policy"] if not hasattr(task_instance_mutation_hook, 'is_noop'): task_instance_mutation_hook.is_noop = False log.info("Loaded airflow_local_settings from %s .", airflow_local_settings.__file__) except ModuleNotFoundError as e: if e.name == "airflow_local_settings": log.debug("No airflow_local_settings to import.", exc_info=True) else: log.critical( "Failed to import airflow_local_settings due to a transitive module not found error.", exc_info=True, ) raise except ImportError: log.critical("Failed to import airflow_local_settings.", exc_info=True) raise
47,444
195,857
190
sympy/functions/elementary/complexes.py
75
13
def unpolarify(eq, subs=None, exponents_only=False): if isinstance(eq, bool): return eq eq = sympify(eq) if subs is not None: return unpolarify(eq.subs(subs)) changed = True pause = False if exponents_only: pause = True while changed: changed = False res = _unpolarify(eq, exponents_only, pause) if res != eq: changed = True eq = res if isinstance(res, bool): return res # Finally, replacing Exp(0) by 1 is always correct. # So is polar_lift(0) -> 0. return res.subs({exp_pola
Improved documentation formatting
unpolarify
cda8dfe6f45dc5ed394c2f5cda706cd6c729f713
sympy
complexes.py
11
19
https://github.com/sympy/sympy.git
7
116
0
46
184
Python
{ "docstring": "\n If `p` denotes the projection from the Riemann surface of the logarithm to\n the complex line, return a simplified version `eq'` of `eq` such that\n `p(eq') = p(eq)`.\n Also apply the substitution subs in the end. (This is a convenience, since\n ``unpolarify``, in a certain sense, undoes :func:`polarify`.)\n\n Examples\n ========\n\n >>> from sympy import unpolarify, polar_lift, sin, I\n >>> unpolarify(polar_lift(I + 2))\n 2 + I\n >>> unpolarify(sin(polar_lift(I + 7)))\n sin(7 + I)\n ", "language": "en", "n_whitespaces": 112, "n_words": 72, "vocab_size": 56 }
def unpolarify(eq, subs=None, exponents_only=False): if isinstance(eq, bool): return eq eq = sympify(eq) if subs is not None: return unpolarify(eq.subs(subs)) changed = True pause = False if exponents_only: pause = True while changed: changed = False res = _unpolarify(eq, exponents_only, pause) if res != eq: changed = True eq = res if isinstance(res, bool): return res # Finally, replacing Exp(0) by 1 is always correct. # So is polar_lift(0) -> 0. return res.subs({exp_polar(0): 1, polar_lift(0): 0})
21,170
101,766
34
plugins/extract/_base.py
9
4
def check_and_raise_error(self) -> None: for thread in self._th
Extract: Typing and standardization
check_and_raise_error
765e385177bda9b9e99951492ef33b34b4e4773e
faceswap
_base.py
9
7
https://github.com/deepfakes/faceswap.git
2
20
0
9
35
Python
{ "docstring": " Check all threads for errors\n\n Exposed for :mod:`~plugins.extract.pipeline` to check plugin's threads for errors\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 10 }
def check_and_raise_error(self) -> None: for thread in self._threads: thread.check_and_raise_error()
92,777
293,721
54
homeassistant/components/recorder/pool.py
15
9
def recorder_or_dbworker(self) -> bool: thread_name = threading.current_thread().name return bool( thread_name == "Recorder" or thread_name.startswith(DB_WORKER_PREFIX) )
Use a dedicated executor pool for database operations (#68105) Co-authored-by: Erik Montnemery <erik@montnemery.com> Co-authored-by: Franck Nijhof <git@frenck.dev>
recorder_or_dbworker
bc862e97ed68cce8c437327651f85892787e755e
core
pool.py
10
6
https://github.com/home-assistant/core.git
2
31
0
14
55
Python
{ "docstring": "Check if the thread is a recorder or dbworker thread.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def recorder_or_dbworker(self) -> bool: thread_name = threading.current_thread().name return bool( thread_name == "Recorder" or thread_name.startswith(DB_WORKER_PREFIX) )
120,843
335,973
112
scripts/convert_ldm_original_checkpoint_to_diffusers.py
44
9
def renew_resnet_paths(old_list, n_shave_prefix_segments=0): mapping = [] for old_item in old_list: new_item = old_item.replace('in_layers.0', 'norm1')
LDM conversion script (#92) Conversion script Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
renew_resnet_paths
87060e6a9c7754b648e621175b4d73161e82906e
diffusers
convert_ldm_original_checkpoint_to_diffusers.py
12
12
https://github.com/huggingface/diffusers.git
2
105
0
30
189
Python
{ "docstring": "\n Updates paths inside resnets to the new naming scheme (local renaming)\n ", "language": "en", "n_whitespaces": 18, "n_words": 11, "vocab_size": 11 }
def renew_resnet_paths(old_list, n_shave_prefix_segments=0): mapping = [] for old_item in old_list: new_item = old_item.replace('in_layers.0', 'norm1') new_item = new_item.replace('in_layers.2', 'conv1') new_item = new_item.replace('out_layers.0', 'norm2') new_item = new_item.replace('out_layers.3', 'conv2') new_item = new_item.replace('emb_layers.1', 'time_emb_proj') new_item = new_item.replace('skip_connection', 'conv_shortcut') new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({'old': old_item, 'new': new_item}) return mapping
82,808
278,998
113
keras/utils/metrics_utils.py
42
13
def _assert_splits_match(nested_splits_lists): error_msg = ( "Inputs must have identical ragged splits. " f"Input received: {nested_splits_lists}" ) for splits_list in nested_splits_lists: if len(splits_list) != len(nested_splits_lists[0]): rais
Remove pylint comments. PiperOrigin-RevId: 452353044
_assert_splits_match
3613c3defc39c236fb1592c4f7ba1a9cc887343a
keras
metrics_utils.py
11
13
https://github.com/keras-team/keras.git
5
78
0
37
124
Python
{ "docstring": "Checks that the given splits lists are identical.\n\n Performs static tests to ensure that the given splits lists are identical,\n and returns a list of control dependency op tensors that check that they are\n fully identical.\n\n Args:\n nested_splits_lists: A list of nested_splits_lists, where each split_list\n is a list of `splits` tensors from a `RaggedTensor`, ordered from\n outermost ragged dimension to innermost ragged dimension.\n\n Returns:\n A list of control dependency op tensors.\n Raises:\n ValueError: If the splits are not identical.\n ", "language": "en", "n_whitespaces": 129, "n_words": 79, "vocab_size": 49 }
def _assert_splits_match(nested_splits_lists): error_msg = ( "Inputs must have identical ragged splits. " f"Input received: {nested_splits_lists}" ) for splits_list in nested_splits_lists: if len(splits_list) != len(nested_splits_lists[0]): raise ValueError(error_msg) return [ tf.debugging.assert_equal(s1, s2, message=error_msg) for splits_list in nested_splits_lists[1:] for (s1, s2) in zip(nested_splits_lists[0], splits_list) ]
21,280
101,898
137
lib/gui/display_command.py
38
16
def _iteration_limit_callback(self, *args) -> None: try: limit = self.vars["display_iterations"].get() except tk.TclError:
Typing - lib.gui.display_command
_iteration_limit_callback
dab823a3eb7a5257cb1e0818ee10ed234d3de97f
faceswap
display_command.py
12
11
https://github.com/deepfakes/faceswap.git
3
62
0
36
105
Python
{ "docstring": " Limit the amount of data displayed in the live graph on a iteration slider\n variable change. ", "language": "en", "n_whitespaces": 24, "n_words": 16, "vocab_size": 15 }
def _iteration_limit_callback(self, *args) -> None: try: limit = self.vars["display_iterations"].get() except tk.TclError: # Don't update when there is no value in the variable return logger.debug("Updating graph iteration limit: (new_value: %s, args: %s)", limit, args) for graph in self.subnotebook.children.values(): graph.calcs.set_iterations_limit(limit)
9,138
47,512
290
tests/jobs/test_scheduler_job.py
88
43
def test_queued_dagruns_stops_creating_when_max_active_is_reached(self, dag_maker): with dag_maker(max_active_runs=10) as dag: EmptyOperator(task_id='mytask') session = settings.Session() self.scheduler_job = SchedulerJob(subdir=os.devnull) self.scheduler_job.executor = MockExecutor() self.scheduler_job.processor_agent = mock.MagicMock() self.scheduler_job.dagbag = dag_maker.dagbag session = settings.Session() orm_dag = session.query(DagModel).get(dag.dag_id) assert orm_dag is no
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
test_queued_dagruns_stops_creating_when_max_active_is_reached
49e336ae0302b386a2f47269a6d13988382d975f
airflow
test_scheduler_job.py
13
26
https://github.com/apache/airflow.git
4
278
0
48
448
Python
{ "docstring": "This tests that queued dagruns stops creating once max_active_runs is reached", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def test_queued_dagruns_stops_creating_when_max_active_is_reached(self, dag_maker): with dag_maker(max_active_runs=10) as dag: EmptyOperator(task_id='mytask') session = settings.Session() self.scheduler_job = SchedulerJob(subdir=os.devnull) self.scheduler_job.executor = MockExecutor() self.scheduler_job.processor_agent = mock.MagicMock() self.scheduler_job.dagbag = dag_maker.dagbag session = settings.Session() orm_dag = session.query(DagModel).get(dag.dag_id) assert orm_dag is not None for _ in range(20): self.scheduler_job._create_dag_runs([orm_dag], session) drs = session.query(DagRun).all() assert len(drs) == 10 for dr in drs: dr.state = State.RUNNING session.merge(dr) session.commit() assert session.query(DagRun.state).filter(DagRun.state == State.RUNNING).count() == 10 for _ in range(20): self.scheduler_job._create_dag_runs([orm_dag], session) assert session.query(DagRun).count() == 10 assert session.query(DagRun.state).filter(DagRun.state == State.RUNNING).count() == 10 assert session.query(DagRun.state).filter(DagRun.state == State.QUEUED).count() == 0 assert orm_dag.next_dagrun_create_after is None
44,334
183,781
32
tests/test_xterm_parser.py
17
7
def test_escape_sequence_resulting_in_multiple_keypresses(parser): events = list(parser.feed("\x1b[2;4~")) assert len(events) == 2 assert events[0].key == "escape"
Backtracking unknown escape sequences, various tests for XTermParser
test_escape_sequence_resulting_in_multiple_keypresses
bfb962bacf274373e5706090cd854b6aa0857270
textual
test_xterm_parser.py
11
5
https://github.com/Textualize/textual.git
1
42
0
13
75
Python
{ "docstring": "Some sequences are interpreted as more than 1 keypress", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def test_escape_sequence_resulting_in_multiple_keypresses(parser): events = list(parser.feed("\x1b[2;4~")) assert len(events) == 2 assert events[0].key == "escape" assert events[1].key == "shift+insert"
51,057
205,271
516
django/db/migrations/autodetector.py
85
33
def generate_altered_options(self): models_to_check = self.kept_model_keys.union( self.kept_proxy_keys, self.kept_unmanaged_keys, # unmanaged converted to managed self.old_unmanaged_keys & self.new_model_keys, # managed converted to unmanaged self.old_model_keys & self.new_unmanaged_keys, ) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_options = { key: value for key, value in old_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } new_options = { key: value for key, value in new_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } if old_options != new_options: self.add_operation( app_label, operations.AlterModelOptions( name=model_name, options=new_options, ),
Refs #33476 -- Reformatted code with Black.
generate_altered_options
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
autodetector.py
14
31
https://github.com/django/django.git
7
165
0
52
248
Python
{ "docstring": "\n Work out if any non-schema-affecting options have changed and make an\n operation to represent them in state changes (in case Python code in\n migrations needs them).\n ", "language": "en", "n_whitespaces": 55, "n_words": 26, "vocab_size": 25 }
def generate_altered_options(self): models_to_check = self.kept_model_keys.union( self.kept_proxy_keys, self.kept_unmanaged_keys, # unmanaged converted to managed self.old_unmanaged_keys & self.new_model_keys, # managed converted to unmanaged self.old_model_keys & self.new_unmanaged_keys, ) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_options = { key: value for key, value in old_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } new_options = { key: value for key, value in new_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } if old_options != new_options: self.add_operation( app_label, operations.AlterModelOptions( name=model_name, options=new_options, ), )
50,315
203,341
250
django/contrib/admin/checks.py
42
16
def _check_readonly_fields(self, obj): if obj.readonly_fields == (): return [] elif not isinstance(obj.readonly_fields, (list, tuple)): return must_be( "a list o
Refs #33476 -- Reformatted code with Black.
_check_readonly_fields
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
checks.py
16
16
https://github.com/django/django.git
4
85
0
37
137
Python
{ "docstring": "Check that readonly_fields refers to proper attribute or field.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def _check_readonly_fields(self, obj): if obj.readonly_fields == (): return [] elif not isinstance(obj.readonly_fields, (list, tuple)): return must_be( "a list or tuple", option="readonly_fields", obj=obj, id="admin.E034" ) else: return list( chain.from_iterable( self._check_readonly_fields_item( obj, field_name, "readonly_fields[%d]" % index ) for index, field_name in enumerate(obj.readonly_fields) ) )
@register.simple_tag(takes_context=True)
15,652
71,268
139
wagtail/admin/templatetags/wagtailadmin_tags.py
61
16
def querystring(context, **kwargs): request = context["request"] querydict = request.GET.copy() # Can't do querydict.update(kwargs), because QueryDict.update() appends to # the list of values, instead of replacing the values. for key, value in kwar
Reformat with black
querystring
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
wagtailadmin_tags.py
13
9
https://github.com/wagtail/wagtail.git
3
67
1
46
132
Python
{ "docstring": "\n Print out the current querystring. Any keyword arguments to this template\n tag will be added to the querystring before it is printed out.\n\n <a href=\"/page/{% querystring key='value' %}\">\n\n Will result in something like:\n\n <a href=\"/page/?foo=bar&key=value\">\n ", "language": "en", "n_whitespaces": 62, "n_words": 35, "vocab_size": 31 }
def querystring(context, **kwargs): request = context["request"] querydict = request.GET.copy() # Can't do querydict.update(kwargs), because QueryDict.update() appends to # the list of values, instead of replacing the values. for key, value in kwargs.items(): if value is None: # Remove the key if the value is None querydict.pop(key, None) else: # Set the key otherwise querydict[key] = str(value) return "?" + querydict.urlencode() @register.simple_tag(takes_context=True)
38,133
159,111
234
rasa/graph_components/validators/finetuning_validator.py
66
26
def _get_fingerprint_of_schema_without_irrelevant_keys(self) -> Text: graph_schema = self._execution_context.graph_schema schema_as_dict = graph_schema.as_dict() for node_name, node_dict in schema_as_dict["nodes"].items(): config_copy = copy.deepcopy(node_dict["config"]) config_copy.pop(EPOCHS, None) # ignore default values since they're filled in anyway later and can # end up in configs (or not) in mysterious ways defaults = graph_schema.nodes[node_name].uses.get_default_config() for key, default_value in defaults.items(): if key in config_copy and config_copy[key] == default_value: config_copy.pop(key) node_dict["config"] = config_copy node_dict.pop("eager") node_dict.pop("constructor_name") return rasa.shared.utils.io.deep_container_fingerprint(schema_as_dict)
Update dependencies in 3.0 to align with rasa-sdk (#10667) * align dependencies * use black 21.7b0 * apply black and docstring reformatting * add changelog
_get_fingerprint_of_schema_without_irrelevant_keys
36eb9c9a5fcca2160e54a6cde5076c93db5bd70b
rasa
finetuning_validator.py
13
23
https://github.com/RasaHQ/rasa.git
5
129
0
52
217
Python
{ "docstring": "Returns a fingerprint of the given schema with certain items removed.\n\n These items include specifications that do not influence actual training\n results such as \"eager\" mode. The only configuration (in your config) that is\n allowed to change is the number of `epochs`.\n\n Returns:\n fingerprint\n ", "language": "en", "n_whitespaces": 90, "n_words": 44, "vocab_size": 38 }
def _get_fingerprint_of_schema_without_irrelevant_keys(self) -> Text: graph_schema = self._execution_context.graph_schema schema_as_dict = graph_schema.as_dict() for node_name, node_dict in schema_as_dict["nodes"].items(): config_copy = copy.deepcopy(node_dict["config"]) config_copy.pop(EPOCHS, None) # ignore default values since they're filled in anyway later and can # end up in configs (or not) in mysterious ways defaults = graph_schema.nodes[node_name].uses.get_default_config() for key, default_value in defaults.items(): if key in config_copy and config_copy[key] == default_value: config_copy.pop(key) node_dict["config"] = config_copy node_dict.pop("eager") node_dict.pop("constructor_name") return rasa.shared.utils.io.deep_container_fingerprint(schema_as_dict)
77,454
263,829
18
PyInstaller/utils/hooks/gi.py
9
6
def get_gi_typelibs(module, version): module_info = GiModuleInfo(module, version) return module_info.collect_
hooks: refactor GObject introspection (gi) hooks The modules imported from gi.repository are marked as runtime modules by their corresponding pre-safe-import-module hooks. Therefore, their standard hooks are always loaded and executed, regardless of whether the modue is actually importable or not. In PyInstaller v5, this behavior triggers errors in hooks for GI modules that are not importable, because the new `isolated` framework propagates the errors instead of swallowing them. While these errors could be caught and demoted to warnings to match the old behavior, it would be better hooks checked whether module is importable before doing any processing at all. To that end, we introduce new class, `GiModuleInfo` that, as part of its initialization, allows us to: - perform availability check - obtain data previously returned by `get_gi_typelibs` - obtain data previously returned by `get_gi_libdir` using a single isolated import attempt (instead of one being performed in each of those steps). In addition, if passed `hook_api` as an optional argument, the `GiModuleInfo` can use hook configuration API to override the GI module version to be collected (which allows the standard use pattern to be removed from the hook itself). The old `get_gi_typelibs` and `get_gi_libdir` functions now internally use `GiModuleInfo` to provide backward compatible behavior to (potential) exetnal user. All `gi` hooks are ported to the `GiModuleInfo` and now become no-op if the module is not available. In addition, hooks are cleaned up/refactored so that all processing is performed either in the loading stage ("simple" hooks that do not require access to hook configuration API) or in the `hook()` function (hooks that require access to hook configuration API), but not in the mixture of the two.
get_gi_typelibs
684bfac8adcf254fec5777f212c13eb62181f900
pyinstaller
gi.py
8
3
https://github.com/pyinstaller/pyinstaller.git
1
22
0
9
37
Python
{ "docstring": "\n Return a tuple of (binaries, datas, hiddenimports) to be used by PyGObject related hooks. Searches for and adds\n dependencies recursively.\n\n :param module: GI module name, as passed to 'gi.require_version()'\n :param version: GI module version, as passed to 'gi.require_version()'\n ", "language": "en", "n_whitespaces": 54, "n_words": 38, "vocab_size": 30 }
def get_gi_typelibs(module, version): module_info = GiModuleInfo(module, version) return module_info.collect_typelib_data()
7,785
43,002
21
airflow/www/security.py
7
7
def _sync_dag_view_permissions(self, dag_id, access_control): dag
Fix permission issue for dag that has dot in name (#23510) How we determine if a DAG is a subdag in airflow.security.permissions.resource_name_for_dag is not right. If a dag_id contains a dot, the permission is not recorded correctly. The current solution makes a query every time we check for permission for dags that has a dot in the name. Not that I like it but I think it's better than other options I considered such as changing how we name dags for subdag. That's not good in UX. Another option I considered was making a query when parsing, that's not good and it's avoided by passing root_dag to resource_name_for_dag Co-authored-by: Ash Berlin-Taylor <ash_github@firemirror.com> Co-authored-by: Tzu-ping Chung <uranusjr@gmail.com>
_sync_dag_view_permissions
cc35fcaf89eeff3d89e18088c2e68f01f8baad56
airflow
security.py
8
26
https://github.com/apache/airflow.git
7
116
0
7
30
Python
{ "docstring": "\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n ", "language": "en", "n_whitespaces": 82, "n_words": 42, "vocab_size": 33 }
def _sync_dag_view_permissions(self, dag_id, access_control): dag_resource_name = permissions.resource_name_for_dag(dag_id)
40,788
172,199
39
pandas/tests/util/test_assert_series_equal.py
20
16
def test_series_equal_datetime_values_mismatch(rtol): msg = s1 = Series(pd.date_range("2018-01-01", periods=3, freq="D")) s2 = Series(pd.date_range("2019-02-02", periods=3, freq="D")) with pytest.raises(AssertionError, match=msg): tm.a
ENH: Include column for ea comparison in asserters (#50323) * ENH: Include column for ea comparison in asserters * Add gh ref * Fix test * Add gh ref * Split tests
test_series_equal_datetime_values_mismatch
07b363ea8eee184df30b54bfae9acd04511e1cda
pandas
test_assert_series_equal.py
12
11
https://github.com/pandas-dev/pandas.git
1
70
0
16
131
Python
{ "docstring": "Series are different\n\nSeries values are different \\\\(100.0 %\\\\)\n\\\\[index\\\\]: \\\\[0, 1, 2\\\\]\n\\\\[left\\\\]: \\\\[1514764800000000000, 1514851200000000000, 1514937600000000000\\\\]\n\\\\[right\\\\]: \\\\[1549065600000000000, 1549152000000000000, 1549238400000000000\\\\]", "language": "en", "n_whitespaces": 17, "n_words": 21, "vocab_size": 18 }
def test_series_equal_datetime_values_mismatch(rtol): msg = s1 = Series(pd.date_range("2018-01-01", periods=3, freq="D")) s2 = Series(pd.date_range("2019-02-02", periods=3, freq="D")) with pytest.raises(AssertionError, match=msg): tm.assert_series_equal(s1, s2, rtol=rtol)
54,344
216,038
132
tests/pytests/functional/pillar/test_gpg.py
73
16
def test_decrypt_pillar_invalid_renderer(salt_master, grains, pillar_homedir): opts = salt_master.config.copy() opts["decrypt_pillar"] = [{"secrets:vault": "gpg"}] opts["dec
Add tests for gpg decryption failure option Test that: 1. Pillar registers an error when `gpg_decrypt_must_succeed` is `True` and decryption fails 2. The GPG renderer fails silently when `gpg_decrypt_must_succeed` is `False` Also mock `__opts__["gpg_decrypt_must_succeed"]` for gpg renderer unit pytests.
test_decrypt_pillar_invalid_renderer
b856d3225ef1003cbe94499dc8bd82efffabb661
salt
test_gpg.py
10
17
https://github.com/saltstack/salt.git
1
185
0
56
346
Python
{ "docstring": "\n Test decryption using a renderer which is not permitted. It should\n fail, leaving the encrypted keys intact, and add an error to the pillar\n dictionary.\n\n decrypt_pillar_default: foo\n decrypt_pillar_renderers:\n - foo\n - bar\n decrypt_pillar:\n - 'secrets:vault': gpg\n ", "language": "en", "n_whitespaces": 97, "n_words": 36, "vocab_size": 32 }
def test_decrypt_pillar_invalid_renderer(salt_master, grains, pillar_homedir): opts = salt_master.config.copy() opts["decrypt_pillar"] = [{"secrets:vault": "gpg"}] opts["decrypt_pillar_default"] = "foo" opts["decrypt_pillar_renderers"] = ["foo", "bar"] pillar_obj = salt.pillar.Pillar(opts, grains, "test", "base") ret = pillar_obj.compile_pillar() expected = copy.deepcopy(GPG_PILLAR_ENCRYPTED) expected["_errors"] = [ "Failed to decrypt pillar key 'secrets:vault': 'gpg' is not a valid decryption" " renderer. Valid choices are: foo, bar" ] assert ret["_errors"] == expected["_errors"] assert ret["secrets"]["vault"]["foo"] == expected["secrets"]["vault"]["foo"] assert ret["secrets"]["vault"]["bar"] == expected["secrets"]["vault"]["bar"] assert ret["secrets"]["vault"]["baz"] == expected["secrets"]["vault"]["baz"] assert ret["secrets"]["vault"]["qux"] == expected["secrets"]["vault"]["qux"]
@DeveloperAPI
27,515
124,104
49
python/ray/tune/trainable/session.py
14
9
def get_trial_name(): warnings.warn( _deprecation_msg, DeprecationWarning, ) _session = get_session() if _session: return _session.trial_name
[air] update documentation to use `session.report` (#26051) Update documentation to use `session.report`. Next steps: 1. Update our internal caller to use `session.report`. Most importantly, CheckpointManager and DataParallelTrainer. 2. Update `get_trial_resources` to use PGF notions to incorporate the requirement of ResourceChangingScheduler. @Yard1 3. After 2 is done, change all `tune.get_trial_resources` to `session.get_trial_resources` 4. [internal implementation] remove special checkpoint handling logic from huggingface trainer. Optimize the flow for checkpoint conversion with `session.report`. Co-authored-by: Antoni Baum <antoni.baum@protonmail.com>
get_trial_name
ac831fded416381ad3c7fe2ba135eaa4aaab5879
ray
session.py
8
8
https://github.com/ray-project/ray.git
2
26
1
14
49
Python
{ "docstring": "Trial name for the corresponding trial.\n\n For function API use only.\n ", "language": "en", "n_whitespaces": 17, "n_words": 11, "vocab_size": 11 }
def get_trial_name(): warnings.warn( _deprecation_msg, DeprecationWarning, ) _session = get_session() if _session: return _session.trial_name @DeveloperAPI
36,580
156,139
48
dask/utils.py
23
14
def get_scheduler_lock(collection=None, scheduler=None): from dask import multiprocessing from dask.base import get_scheduler actual_get = get_scheduler(collections=[collection], scheduler=scheduler) if actual_get == multiprocessing.get: return multiprocessing.get_context().Manager().
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
get_scheduler_lock
cccb9d8d8e33a891396b1275c2448c352ef40c27
dask
utils.py
13
7
https://github.com/dask/dask.git
2
61
0
19
100
Python
{ "docstring": "Get an instance of the appropriate lock for a certain situation based on\n scheduler used.", "language": "en", "n_whitespaces": 17, "n_words": 15, "vocab_size": 15 }
def get_scheduler_lock(collection=None, scheduler=None): from dask import multiprocessing from dask.base import get_scheduler actual_get = get_scheduler(collections=[collection], scheduler=scheduler) if actual_get == multiprocessing.get: return multiprocessing.get_context().Manager().Lock() return SerializableLock()
80,868
271,851
150
keras/engine/training_utils_v1.py
58
16
def verify_dataset_shuffled(x): assert isinstance(x, tf.data.Dataset) graph_def = get_dataset_graph_def(x) for node in graph_def.node: if node.op.startswith("ShuffleDataset"): return True # Also check graph_def.library.function for ds.interleave or ds.flat_map for function in graph_def.library.functi
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
verify_dataset_shuffled
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
training_utils_v1.py
12
15
https://github.com/keras-team/keras.git
6
79
0
45
134
Python
{ "docstring": "Verifies that the dataset is shuffled.\n\n Args:\n x: Dataset passed as an input to the model.\n\n Returns:\n boolean, whether the input dataset is shuffled or not.\n ", "language": "en", "n_whitespaces": 45, "n_words": 26, "vocab_size": 21 }
def verify_dataset_shuffled(x): assert isinstance(x, tf.data.Dataset) graph_def = get_dataset_graph_def(x) for node in graph_def.node: if node.op.startswith("ShuffleDataset"): return True # Also check graph_def.library.function for ds.interleave or ds.flat_map for function in graph_def.library.function: for node in function.node_def: if node.op.startswith("ShuffleDataset"): return True logging.warning( "Expected a shuffled dataset but input dataset `x` is " "not shuffled. Please invoke `shuffle()` on input dataset." ) return False
@log_start_end(log=logger)
84,165
282,485
39
gamestonk_terminal/cryptocurrency/due_diligence/binance_model.py
18
12
def get_binance_available_quotes_for_each_coin() -> dict:
Global plot styles (#1228) * Add default stylesheets * Add terminal style helper class and global style initialization in cfg * Style comments and docstrings * Load rich terminal theme from config file * Add application chart styles to candle charts * Add todos * Remove explicit color setting for some ta charts * Add user styles folder to gitignore * Update default stylesheets * Add matplotlib font manager support * Add matplotlib font manager support * Update docstrings and default style * Update stocks candle chart formatting (return fig to style title) * Style common ta overlap view * Make up and down market colors a part of the style helper * Update stylesheets * Style common ta volume view * Style common ta momentum view * Style common ta trend indicators view * Style common ta volatility view * Style common ta volume view * Style common ta custom indicators view * Fix styling bugs and remove the obvious time x lablel * Style charts in the covid menu * Set legend position to upper left in the mpl stylesheet * Add mpl_rcparams configs for parameters not covered by stylesheets * Remove font configuration files * Update style class utility functions * Implement passing external axes and style utility usage in ema & stoch * Add theme watermark and output helpers * Rename style to theme * Update helper usage in ta/ma and ta/stoch * Update style to theme in sample menus * Style forex (#1305) * Make tight layout optional 'cause mplfinance doesn't support it * Apply global style to the forex menu * Update code layout in oanda view and black * Style common TA (#1315) * Make tight layout optional 'cause mplfinance doesn't support it * Apply global style to the forex menu * Add linewidth to theme for use in mpf's addplots * Add vwap to the stocks notebook api * Update common/ta overlap to follow charting style * Apply style on TerminalStyle init * Enable infrastructure for excluding non-trading days from plots * Update notebook api to include there and resolve bandit warning * Update ta/common/overlap to exclude non-trading days * Enable external ax, style and non-trading days in common/ta/momentum * Enable external ax, style and non-trading days in common/ta/trend * Update vwap to the argument naming convention * Enable external ax, style and non-trading days in common/ta/volatility * Enable external ax, style and non-trading days in common/ta/volume * Enable external ax, style and non-trading days in common/ta/custom * Fix controller tests * Forgot to disable rewriting of the cassettes ... * Fix controller errors that came up because a merge conflict * Fix price label position on fib * Fix line having wrong x values in fib Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com> * Style economy (#1308) * Began converting * Added alphavan_view * Added CNN View * Updated nasdaq view, fixed glitch * Added fred * Refactored URL * Theo's requested changes * Updated docstrings * Updated tests * Fixed pylint * Fixed tests * Theo changes * Econ Fix * Refactor chart style for Crypto context (#1306) * Remove mock for gff * Mock visualize_output helper function * Refactor * Fix plot helper * Update legend loc * Refactor mplfinance candle plot * Fix errors in the helper function * Fix binbook having the wrong call_ function name * Remove hardcoded style params * Resolve kwargs future warning from pandas * Remove warnings import Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> * funds + custom (#1311) * funds + custom * cleanup cleanup everybody everywhere * Fix external axes conditional and a typo Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> * Add external axes mode to covid charts (#1328) * Add portfolio menu plots (#1318) * Portfolio view plots (commenting out report stuff) * PA Menu broken. Commenting out and fix tests * portfolio optimization * comment out commented api line * Add notes on disabling the pa submenu Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> * Plot updates in common BA (#1335) * Add external axes support to common/ba/finbrain * Add external axes support to common/ba/twitter * Add external axes support to common/ba/google * Add external axes support to common/ba/sentimentinvestor * Add sentimentinvestor to the notebooks API * Fix tests * Etf refactor (#1323) * Refactored no ETF * Fixed gtff import * Fixed tests * Fix pie chart style * Refactored etf/candle * Added pylint fix * Fixed tests * Update candle chart layout * Update etf controller test * Remove strange binary file Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> * Expose ETF candle function in the notebooks API * Common BA and Common QA charts update (#1342) * Add external axes support to common/ba/finbrain * Add external axes support to common/ba/twitter * Add external axes support to common/ba/google * Add external axes support to common/ba/sentimentinvestor * Add sentimentinvestor to the notebooks API * Fix tests * Update stylesheet files * Refactor charts for common/qa * Update the forgotten line plot * Update tests * Add missing arg to a docstring * Remove scientific notation * Black imports Co-authored-by: Minh Hoang <nminh.hoang1023@gmail.com> * Options refactor (#1324) * Fixed alphaquery_view * finished options * Fixed pylint * Fixed tests * Fixed tests * Fixed tests * update yfinance * Tradier + Chartexchange * change mocks from gtff to theme.visualize output * tests Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> Co-authored-by: james <jmaslek11@gmail.com> * Refactor Stocks menu (#1325) * Fix backtesting menu * Refactor comparison analysis * Refactor Dark pool shorts * Refactor rest of menu * Fix test * Fix tests failing * Fix tests fail * Fix test failing * Remove record mode=none to record new output * Rewrite test output * Rewrite test outputs * Adding more rewritten test output * Mock plt.show * Mock missing plt.show * Missing @pytest.mark.vcr * Updating tests : common/behavioural_analysis/finbrain * Improve notebooks API coverage for CA and DPS * Silence annoying flake8 warning Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> * Charts update for common/pred (#1344) * Add external axes support to common/ba/finbrain * Add external axes support to common/ba/twitter * Add external axes support to common/ba/google * Add external axes support to common/ba/sentimentinvestor * Add sentimentinvestor to the notebooks API * Fix tests * Update stylesheet files * Refactor charts for common/qa * Update the forgotten line plot * Update tests * Add missing arg to a docstring * Style pred helper and controllers * Update ETS plot * Update plots in KNN and pred helper * Update plot and pretty table for arima * Update plot for common/pred/regression * Refactor mc_view * Fix linting * Fix mypy * Move plot title to the axis level to make more vertical space Co-authored-by: Minh Hoang <nminh.hoang1023@gmail.com> Co-authored-by: jmaslek <jmaslek11@gmail.com> * linter * Update common/ba test data * Change etf candle to match stock candle * try updating sia test Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com> Co-authored-by: jmaslek <jmaslek11@gmail.com> Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com> Co-authored-by: Minh Hoang <nminh.hoang1023@gmail.com> Co-authored-by: Chavithra PARANA <chavithra@gmail.com>
get_binance_available_quotes_for_each_coin
e1b6022b9cf156ffc0697d0d25a5ed2772ea8d68
OpenBBTerminal
binance_model.py
12
15
https://github.com/OpenBB-finance/OpenBBTerminal.git
2
40
1
16
82
Python
{ "docstring": "Helper methods that for every coin available on Binance add all quote assets. [Source: Binance]\n\n Returns\n -------\n dict:\n All quote assets for given coin\n {'ETH' : ['BTC', 'USDT' ...], 'UNI' : ['ETH', 'BTC','BUSD', ...]\n\n ", "language": "en", "n_whitespaces": 60, "n_words": 34, "vocab_size": 30 }
def get_binance_available_quotes_for_each_coin() -> dict: trading_pairs = _get_trading_pairs() results = defaultdict(list) for pair in trading_pairs: results[pair["baseAsset"]].append(pair["quoteAsset"]) return results @log_start_end(log=logger)
14,688
67,965
54
erpnext/stock/stock_ledger.py
73
17
def update_qty_in_future_sle(args, allow_negative_stock=False): datetime_limit_condition = "" qty_shift = args.actual_qty # find difference/shift in qty caused by stock reconciliation if args.voucher_type == "Stock Reconciliation": qty_shift = get_stock_reco_qty_shift(args) # find the next nearest stock reco so that we only recalculate SLEs till that point next_stock_reco_detail = get_next_stock_reco(args) if next_stock_reco_detail: detail = next_stock_reco_detail[0] # add condi
style: format code with black
update_qty_in_future_sle
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
stock_ledger.py
10
31
https://github.com/frappe/erpnext.git
3
80
0
59
136
Python
{ "docstring": "Recalculate Qty after Transaction in future SLEs based on current SLE.\n\t\tupdate `tabStock Ledger Entry`\n\t\tset qty_after_transaction = qty_after_transaction + {qty_shift}\n\t\twhere\n\t\t\titem_code = %(item_code)s\n\t\t\tand warehouse = %(warehouse)s\n\t\t\tand voucher_no != %(voucher_no)s\n\t\t\tand is_cancelled = 0\n\t\t\tand (timestamp(posting_date, posting_time) > timestamp(%(posting_date)s, %(posting_time)s)\n\t\t\t\tor (\n\t\t\t\t\ttimestamp(posting_date, posting_time) = timestamp(%(posting_date)s, %(posting_time)s)\n\t\t\t\t\tand creation > %(creation)s\n\t\t\t\t)\n\t\t\t)\n\t\t{datetime_limit_condition}\n\t\t", "language": "en", "n_whitespaces": 42, "n_words": 57, "vocab_size": 43 }
def update_qty_in_future_sle(args, allow_negative_stock=False): datetime_limit_condition = "" qty_shift = args.actual_qty # find difference/shift in qty caused by stock reconciliation if args.voucher_type == "Stock Reconciliation": qty_shift = get_stock_reco_qty_shift(args) # find the next nearest stock reco so that we only recalculate SLEs till that point next_stock_reco_detail = get_next_stock_reco(args) if next_stock_reco_detail: detail = next_stock_reco_detail[0] # add condition to update SLEs before this date & time datetime_limit_condition = get_datetime_limit_condition(detail) frappe.db.sql( .format( qty_shift=qty_shift, datetime_limit_condition=datetime_limit_condition ), args, ) validate_negative_qty_in_future_sle(args, allow_negative_stock)
50,273
203,245
80
django/templatetags/tz.py
40
9
def get_current_timezone_tag(parser, token): # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments a
Refs #33476 -- Refactored problematic code before reformatting by Black. In these cases Black produces unexpected results, e.g. def make_random_password( self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789', ): or cursor.execute(""" SELECT ... """, [table name], )
get_current_timezone_tag
c5cd8783825b5f6384417dac5f3889b4210b7d08
django
tz.py
11
7
https://github.com/django/django.git
3
47
0
38
81
Python
{ "docstring": "\n Store the name of the current time zone in the context.\n\n Usage::\n\n {% get_current_timezone as TIME_ZONE %}\n\n This will fetch the currently active time zone and put its name\n into the ``TIME_ZONE`` context variable.\n ", "language": "en", "n_whitespaces": 57, "n_words": 34, "vocab_size": 27 }
def get_current_timezone_tag(parser, token): # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments args = token.contents.split() if len(args) != 3 or args[1] != 'as': raise TemplateSyntaxError( "'get_current_timezone' requires 'as variable' (got %r)" % args ) return GetCurrentTimezoneNode(args[2])
@register_agent("fake_report")
47,106
194,834
365
tests/test_train_model.py
55
25
def test_save_multiple_world_logs_mutator(self): with testing_utils.tempdir() as tmpdir: log_report = os.path.join(tmpdir, 'world_logs.jsonl') multitask = 'integration_tests:mutators=flatt
Fixes train_model worldlogging for multitask with mutators. (#4414) * Fixes train_model worldlogging for multitask with mutators. * Fix bug in train_model when evaltask doesn't match task.
test_save_multiple_world_logs_mutator
d6773a0b4acf1027dc9b68342a1d84344f1a0d95
ParlAI
test_train_model.py
14
21
https://github.com/facebookresearch/ParlAI.git
2
113
1
47
207
Python
{ "docstring": "\n Test that we can save multiple world_logs from train model on multiple tasks\n with mutators present.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 15 }
def test_save_multiple_world_logs_mutator(self): with testing_utils.tempdir() as tmpdir: log_report = os.path.join(tmpdir, 'world_logs.jsonl') multitask = 'integration_tests:mutators=flatten,integration_tests:ReverseTeacher:mutator=reverse' valid, test = testing_utils.train_model( { 'task': multitask, 'validation_max_exs': 10, 'model': 'repeat_label', 'short_final_eval': True, 'num_epochs': 1.0, 'world_logs': log_report, } ) for task in multitask.split(','): task_log_report = get_task_world_logs( task, log_report, is_multitask=True ) with PathManager.open(task_log_report) as f: json_lines = f.readlines() assert len(json_lines) == 5 @register_agent("fake_report")
32,293
141,204
151
python/ray/tune/tests/test_trial_relative_logdir.py
36
19
def testDotsInLogdir(self): local_dir_path = Path("/tmp/test_
[tune] Relative logdir paths in trials for ExperimentAnalysis in remote buckets (#25063) When running an experiment for example in the cloud and syncing to a bucket the logdir path in the trials will be changed when working with the checkpoints in the bucket. There are some workarounds, but the easier solution is to also add a rel_logdir containing the relative path to the trials/checkpoints that can handle any changes in the location of experiment results. As discussed with @Yard1 and @krfricke Co-authored-by: Antoni Baum <antoni.baum@protonmail.com> Co-authored-by: Kai Fricke <kai@anyscale.com>
testDotsInLogdir
2a5d322e705df080e9254c9c9a3e187c1ea41c4e
ray
test_trial_relative_logdir.py
14
13
https://github.com/ray-project/ray.git
4
100
0
22
179
Python
{ "docstring": "This should result in errors as dots in paths are not allowed.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
def testDotsInLogdir(self): local_dir_path = Path("/tmp/test_rel_dots") local_dir = str(local_dir_path) if local_dir_path.exists(): local_dir = tempfile.mkdtemp(prefix=str(local_dir_path) + "_") trial = Trial(trainable_name="rel_logdir", local_dir=local_dir) with self.assertRaises(ValueError): trial.logdir = "/tmp/test_rel/../dots" with self.assertRaises(ValueError): trial.logdir = local_dir + "/../" if shutil.rmtree.avoids_symlink_attacks: if local_dir_path.exists(): shutil.rmtree(local_dir)
@override_settings(WAGTAILIMAGES_IMAGE_MODEL="tests.CustomImage")
16,361
75,124
155
wagtail/images/tests/test_admin_views.py
40
25
def test_delete_post(self): # Send request response = self.client.post( reverse("wagtailimages:delete_multiple", args=(self.ima
Reformat with black
test_delete_post
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_admin_views.py
14
12
https://github.com/wagtail/wagtail.git
1
128
1
33
232
Python
{ "docstring": "\n This tests that a POST request to the delete view deletes the image\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 12 }
def test_delete_post(self): # Send request response = self.client.post( reverse("wagtailimages:delete_multiple", args=(self.image.id,)) ) # Check response self.assertEqual(response.status_code, 200) self.assertEqual(response["Content-Type"], "application/json") # Make sure the image is deleted self.assertFalse(Image.objects.filter(id=self.image.id).exists()) # Check JSON response_json = json.loads(response.content.decode()) self.assertIn("image_id", response_json) self.assertIn("success", response_json) self.assertEqual(response_json["image_id"], self.image.id) self.assertTrue(response_json["success"]) @override_settings(WAGTAILIMAGES_IMAGE_MODEL="tests.CustomImage")
46,030
189,389
329
tests/utils/GraphicalUnitTester.py
106
31
def _show_diff_helper(self, frame_data, expected_frame_data): import matplotlib.gridspec as gridspec # type: ignore import matplotlib.pyplot as plt gs = gridspec.GridSpec(2, 2) fig = plt.figure() fig.suptitle(f"Test for {str(self.scene
Added MyPy Support (#1972) * MyPy Support * MyPy Hook * Removing MyPy Hook * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Delete __init__.pyi * Delete color.pyi * Update .mypy.ini Co-authored-by: Christopher Besch <christopher.besch@gmx.de> * changes * quick fix * MyPy Hook * MyPy Hook Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christopher Besch <christopher.besch@gmx.de>
_show_diff_helper
c4217731e08470d5a56cf02cf76cae01c03fb78f
manim
GraphicalUnitTester.py
14
28
https://github.com/ManimCommunity/manim.git
1
240
0
69
407
Python
{ "docstring": "Will visually display with matplotlib differences between frame generated and the one expected.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
def _show_diff_helper(self, frame_data, expected_frame_data): import matplotlib.gridspec as gridspec # type: ignore import matplotlib.pyplot as plt gs = gridspec.GridSpec(2, 2) fig = plt.figure() fig.suptitle(f"Test for {str(self.scene).replace('Test', '')}", fontsize=16) ax = fig.add_subplot(gs[0, 0]) ax.imshow(frame_data) ax.set_title("Generated :") ax = fig.add_subplot(gs[0, 1]) ax.imshow(expected_frame_data) ax.set_title("Expected :") ax = fig.add_subplot(gs[1, :]) diff_im = expected_frame_data.copy() diff_im = np.where( frame_data != np.array([0, 0, 0, 255]), np.array([0, 255, 0, 255], dtype="uint8"), np.array([0, 0, 0, 255], dtype="uint8"), ) # Set any non-black pixels to green np.putmask( diff_im, expected_frame_data != frame_data, np.array([255, 0, 0, 255], dtype="uint8"), ) # Set any different pixels to red ax.imshow(diff_im, interpolation="nearest") ax.set_title("Differences summary : (green = same, red = different)") plt.show() plt.savefig(f"{self.scene}.png")
17,326
82,188
84
awx/main/scheduler/task_manager_models.py
30
8
def consume_capacity(self, task): if self.is_container_gr
Add max concurrent jobs and max forks per ig The intention of this feature is primarily to provide some notion of max capacity of container groups, but the logic I've left generic. Default is 0, which will be interpereted as no maximum number of jobs or forks. Includes refactor of variable and method names for clarity. instances_by_hostname is an internal attribute of TaskManagerInstances. Clarify when we are expecting the actual TaskManagerInstances object. Unify how we process running tasks and consume capacity. This has the effect that we do less expensive work in after_lock_init and have 1 less loop over all the running tasks. Previously we looped for both building the dependency graph as well as for calculating the starting capacity of all the instances and instance groups. Now we acheive both tasks in the same loop. Because of how this changes the somewhat subtle "do-si-do" of how to initialize the Task Manager models, introduce a wrapper class that tries to take some of that burden off of other areas where we re-use this like in the serializer and the metrics. Also use this wrapper class to handle nicities of how to track capacity consumption on instances and instance groups. Add tests for max_forks and max_concurrent_jobs Fixup tests that use TaskManagerModels to accomodate changes. assign ig before call to consume capacity if we don't do it in that order, then we don't correctly account for the container group jobs we are starting in the middle of the task manager run
consume_capacity
86856f242aec6051c1cace683fe1761c0775babb
awx
task_manager_models.py
11
6
https://github.com/ansible/awx.git
2
32
0
28
56
Python
{ "docstring": "We only consume capacity on an instance group level if it is a container group. Otherwise we consume capacity on an instance level.", "language": "en", "n_whitespaces": 22, "n_words": 23, "vocab_size": 18 }
def consume_capacity(self, task): if self.is_container_group: self.container_group_jobs += 1 self.container_group_consumed_forks += task.task_impact else: raise RuntimeError("We only track capacity for container groups at the instance group level. Otherwise, consume capacity on instances.")
31,216
137,681
25
python/ray/util/spark/utils.py
9
8
def get_avail_mem_per_ray_worker_node(spark, object_store_memory_per_node): num_cpus_per_spark_task = int( spark.sparkContext.getConf().get("spark.task.cpus", "1") )
Ray on spark implementation (#28771) REP: ray-project/enhancements#14
get_avail_mem_per_ray_worker_node
e76ccee69aaa7583be1a9d81cf7b2aa72cf25647
ray
utils.py
13
20
https://github.com/ray-project/ray.git
2
83
0
9
49
Python
{ "docstring": "\n Return the available heap memory and object store memory for each ray worker.\n NB: We have one ray node per spark task.\n ", "language": "en", "n_whitespaces": 32, "n_words": 22, "vocab_size": 20 }
def get_avail_mem_per_ray_worker_node(spark, object_store_memory_per_node): num_cpus_per_spark_task = int( spark.sparkContext.getConf().get("spark.task.cpus", "1") )
3,751
21,285
214
pipenv/patched/notpip/_internal/metadata/importlib/_dists.py
44
10
def _iter_egg_info_dependencies(self) -> Iterable[str]: for entry i
Vendor in pip 22.1.2
_iter_egg_info_dependencies
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
pipenv
_dists.py
16
26
https://github.com/pypa/pipenv.git
7
69
0
30
161
Python
{ "docstring": "Get distribution dependencies from the egg-info directory.\n\n To ease parsing, this converts a legacy dependency entry into a PEP 508\n requirement string. Like ``_iter_requires_txt_entries()``, there is code\n in ``importlib.metadata`` that does mostly the same, but not do exactly\n what we need.\n\n Namely, ``importlib.metadata`` does not normalize the extra name before\n putting it into the requirement string, which causes marker comparison\n to fail because the dist-info format do normalize. This is consistent in\n all currently available PEP 517 backends, although not standardized.\n ", "language": "en", "n_whitespaces": 144, "n_words": 81, "vocab_size": 66 }
def _iter_egg_info_dependencies(self) -> Iterable[str]: for entry in self._iter_requires_txt_entries(): if entry.extra and entry.marker: marker = f'({entry.marker}) and extra == "{safe_extra(entry.extra)}"' elif entry.extra: marker = f'extra == "{safe_extra(entry.extra)}"' elif entry.marker: marker = entry.marker else: marker = "" if marker: yield f"{entry.requirement} ; {marker}" else: yield entry.requirement
55,071
218,009
29
python3.10.4/Lib/imp.py
9
7
def cache_from_source(path, debug_override=None): with warnings.catch_warnings(): warnings.simplefilter('ignore') return util.cache_from_source(path, debug_override)
add python 3.10.4 for windows
cache_from_source
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
imp.py
10
4
https://github.com/XX-net/XX-Net.git
1
32
0
9
57
Python
{ "docstring": "**DEPRECATED**\n\n Given the path to a .py file, return the path to its .pyc file.\n\n The .py file does not need to exist; this simply returns the path to the\n .pyc file calculated as if the .py file were imported.\n\n If debug_override is not None, then it must be a boolean and is used in\n place of sys.flags.optimize.\n\n If sys.implementation.cache_tag is None then NotImplementedError is raised.\n\n ", "language": "en", "n_whitespaces": 87, "n_words": 66, "vocab_size": 45 }
def cache_from_source(path, debug_override=None): with warnings.catch_warnings(): warnings.simplefilter('ignore') return util.cache_from_source(path, debug_override)
54,998
217,895
287
python3.10.4/Lib/http/server.py
112
19
def _url_collapse_path(path): # Query componen
add python 3.10.4 for windows
_url_collapse_path
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
server.py
14
25
https://github.com/XX-net/XX-Net.git
10
151
0
71
281
Python
{ "docstring": "\n Given a URL path, remove extra '/'s and '.' path elements and collapse\n any '..' references and returns a collapsed path.\n\n Implements something akin to RFC-2396 5.2 step 6 to parse relative paths.\n The utility of this function is limited to is_cgi method and helps\n preventing some security attacks.\n\n Returns: The reconstituted URL, which will always start with a '/'.\n\n Raises: IndexError if too many '..' occur within the path.\n\n ", "language": "en", "n_whitespaces": 95, "n_words": 70, "vocab_size": 60 }
def _url_collapse_path(path): # Query component should not be involved. path, _, query = path.partition('?') path = urllib.parse.unquote(path) # Similar to os.path.split(os.path.normpath(path)) but specific to URL # path semantics rather than local operating system semantics. path_parts = path.split('/') head_parts = [] for part in path_parts[:-1]: if part == '..': head_parts.pop() # IndexError if more '..' than prior parts elif part and part != '.': head_parts.append( part ) if path_parts: tail_part = path_parts.pop() if tail_part: if tail_part == '..': head_parts.pop() tail_part = '' elif tail_part == '.': tail_part = '' else: tail_part = '' if query: tail_part = '?'.join((tail_part, query)) splitpath = ('/' + '/'.join(head_parts), tail_part) collapsed_path = "/".join(splitpath) return collapsed_path nobody = None
40,402
169,224
72
pandas/core/arrays/sparse/accessor.py
26
16
def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False): from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo A, rows, columns = sparse_series_to_coo( self._parent, row_levels, column_levels, sort_labels=sort_labels ) return A, rows, columns
TYP: type all arguments with bool default values (#48624) * TYP: type all arguments with bool default values * bool_t * ignore type error in pandas/core/arrays/sparse/accessor.py
to_coo
5c66e65d7b9fef47ccb585ce2fd0b3ea18dc82ea
pandas
accessor.py
9
80
https://github.com/pandas-dev/pandas.git
1
64
0
22
89
Python
{ "docstring": "\n Create a scipy.sparse.coo_matrix from a Series with MultiIndex.\n\n Use row_levels and column_levels to determine the row and column\n coordinates respectively. row_levels and column_levels are the names\n (labels) or numbers of the levels. {row_levels, column_levels} must be\n a partition of the MultiIndex level names (or numbers).\n\n Parameters\n ----------\n row_levels : tuple/list\n column_levels : tuple/list\n sort_labels : bool, default False\n Sort the row and column labels before forming the sparse matrix.\n When `row_levels` and/or `column_levels` refer to a single level,\n set to `True` for a faster execution.\n\n Returns\n -------\n y : scipy.sparse.coo_matrix\n rows : list (row labels)\n columns : list (column labels)\n\n Examples\n --------\n >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])\n >>> s.index = pd.MultiIndex.from_tuples(\n ... [\n ... (1, 2, \"a\", 0),\n ... (1, 2, \"a\", 1),\n ... (1, 1, \"b\", 0),\n ... (1, 1, \"b\", 1),\n ... (2, 1, \"b\", 0),\n ... (2, 1, \"b\", 1)\n ... ],\n ... names=[\"A\", \"B\", \"C\", \"D\"],\n ... )\n >>> s\n A B C D\n 1 2 a 0 3.0\n 1 NaN\n 1 b 0 1.0\n 1 3.0\n 2 1 b 0 NaN\n 1 NaN\n dtype: float64\n\n >>> ss = s.astype(\"Sparse\")\n >>> ss\n A B C D\n 1 2 a 0 3.0\n 1 NaN\n 1 b 0 1.0\n 1 3.0\n 2 1 b 0 NaN\n 1 NaN\n dtype: Sparse[float64, nan]\n\n >>> A, rows, columns = ss.sparse.to_coo(\n ... row_levels=[\"A\", \"B\"], column_levels=[\"C\", \"D\"], sort_labels=True\n ... )\n >>> A\n <3x4 sparse matrix of type '<class 'numpy.float64'>'\n with 3 stored elements in COOrdinate format>\n >>> A.todense()\n matrix([[0., 0., 1., 3.],\n [3., 0., 0., 0.],\n [0., 0., 0., 0.]])\n\n >>> rows\n [(1, 1), (1, 2), (2, 1)]\n >>> columns\n [('a', 0), ('a', 1), ('b', 0), ('b', 1)]\n ", "language": "en", "n_whitespaces": 936, "n_words": 279, "vocab_size": 148 }
def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False): from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo A, rows, columns = sparse_series_to_coo( self._parent, row_levels, column_levels, sort_labels=sort_labels ) return A, rows, columns
47,255
195,349
171
projects/bb3/agents/r2c2_bb3_agent.py
28
9
def _get_memory_heuristic_values(self) -> Dict[str, Union[str, float, bool]]: return { 'ignore_in_session_memories': self.opt.get( 'ignore_in_session_memories_mkm', False ), 'memory_overlap_threshold': self.opt.get('memory_overlap_threshold', 0.0), 'memory_hard_block_for_n_turns': self.opt.get( 'memory_hard_block_for_n_turns', 0
[BB3] Memory Heuristics (#4770) * memory heuristics * small changes * address comments * fix config * reqs
_get_memory_heuristic_values
58b6977a9cb45a91d78aabdc3c5538f873829a9f
ParlAI
r2c2_bb3_agent.py
10
16
https://github.com/facebookresearch/ParlAI.git
1
79
0
24
123
Python
{ "docstring": "\n Extract heuristics from self.opt.\n ", "language": "en", "n_whitespaces": 19, "n_words": 4, "vocab_size": 4 }
def _get_memory_heuristic_values(self) -> Dict[str, Union[str, float, bool]]: return { 'ignore_in_session_memories': self.opt.get( 'ignore_in_session_memories_mkm', False ), 'memory_overlap_threshold': self.opt.get('memory_overlap_threshold', 0.0), 'memory_hard_block_for_n_turns': self.opt.get( 'memory_hard_block_for_n_turns', 0 ), 'memory_soft_block_decay_factor': self.opt.get( 'memory_soft_block_decay_factor', 0.0 ), }
78,293
266,099
13
netbox/extras/templatetags/plugins.py
7
4
def plugin_list_buttons(context, model): return _
4751 Enable plugins to inject content within object list views (#10901) * 4751 add plugin buttons to list templates * 4751 add plugin buttons to list templates * 4751 add documentation * 4751 fix object reference * 4751 update docs
plugin_list_buttons
27bf7b4a9add27b4f3f8b0f4fd5dfc4cfe74a65b
netbox
plugins.py
8
2
https://github.com/netbox-community/netbox.git
1
17
0
7
29
Python
{ "docstring": "\n Render all list buttons registered by plugins\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
def plugin_list_buttons(context, model): return _get_registered_content(model, 'list_buttons', context)
10,052
50,215
157
modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/droppath.py
73
16
def drop_path(self, inputs): # if prob is 0 or eval mode, return original input if self.drop_prob == 0. or not self.training: return inputs
add disco_diffusion_ernievil_base
drop_path
ffcde21305c61d950a9f93e57e6180c9a9665b87
PaddleHub
droppath.py
11
10
https://github.com/PaddlePaddle/PaddleHub.git
3
101
0
48
162
Python
{ "docstring": "drop path op\n Args:\n input: tensor with arbitrary shape\n drop_prob: float number of drop path probability, default: 0.0\n training: bool, if current mode is training, default: False\n Returns:\n output: output tensor after drop path\n ", "language": "en", "n_whitespaces": 99, "n_words": 34, "vocab_size": 28 }
def drop_path(self, inputs): # if prob is 0 or eval mode, return original input if self.drop_prob == 0. or not self.training: return inputs keep_prob = 1 - self.drop_prob keep_prob = paddle.to_tensor(keep_prob, dtype='float32') shape = (inputs.shape[0], ) + (1, ) * (inputs.ndim - 1) # shape=(N, 1, 1, 1) random_tensor = keep_prob + paddle.rand(shape, dtype=inputs.dtype) random_tensor = random_tensor.floor() # mask output = inputs.divide(keep_prob) * random_tensor #divide is to keep same output expectation return output
7,473
42,069
17
seaborn/rcmod.py
8
8
def set_style(style=None, rc=None): style_object = axes_style
Convert docs to pydata-sphinx-theme and add new material (#2842) * Do basic conversion of site to pydata_sphinx_theme * Remove some pae structure customizations we no longer need * Add some custom CSS * Tweak a few more colors * Remove vestigial div closing tag * Reorganize release notes into hierarchical pages * Rebuild full docs and fix some resulting issues * Make release note doc refs absolute * Convert homepage to use sphinx-design instead of hand-crafted html * Remove original custom css * Simplify header and put archive switcher in footer * Streamline API docs for objects * Play around with templates to fix shrinking content (not perfect yet) * Improve use of horizontal space without sidebars * Various tweaks * Convert tutorial homepage source to native sphinx-design directives * Move intro page into tutorial * More tweaks * Tweak theme colors and footer * Remove reference to navbar version * Note that error bar tutorial demonstrates new features as of v0.12 * Update layout customization for new theme features * Various layout and CSS tweaks * Narrow support guidance to StackOverflow * Run all notebooks * Adapt to new dropdown navbar in pydata theme * Separate tutorial source and outputs * Separate dostring source and outputs * Add scale API template * Update API docs * Fix requirements * Add new objects * Point doc requirements at v0.10 RC for theme
set_style
34662f4be5c364e7518f9c1118c9b362038ee5dd
seaborn
rcmod.py
8
3
https://github.com/mwaskom/seaborn.git
1
28
0
8
46
Python
{ "docstring": "\n Set the parameters that control the general style of the plots.\n\n The style parameters control properties like the color of the background and\n whether a grid is enabled by default. This is accomplished using the\n matplotlib rcParams system.\n\n The options are illustrated in the\n :doc:`aesthetics tutorial <../tutorial/aesthetics>`.\n\n See :func:`axes_style` to get the parameter values.\n\n Parameters\n ----------\n style : dict, or one of {darkgrid, whitegrid, dark, white, ticks}\n A dictionary of parameters or the name of a preconfigured style.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n style dictionaries. This only updates parameters that are\n considered part of the style definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/set_style.rst\n\n ", "language": "en", "n_whitespaces": 185, "n_words": 111, "vocab_size": 76 }
def set_style(style=None, rc=None): style_object = axes_style(style, rc) mpl.rcParams.update(style_object)
6,252
34,302
247
src/transformers/models/vilt/feature_extraction_vilt.py
97
23
def _resize(self, image, shorter=800, longer=1333, size_divisor=32, resample=Image.BICUBIC): if not isinstance(image, Image.Image): image = self.to_pil_image(image) w, h = image.size min_size = shorter max_size = longer
Add ViLT (#14895) * First commit * Add conversion script * Make conversion script work for base model * More improvements * Update conversion script, works for vqa * Add indexing argument to meshgrid * Make conversion script work for ViltForPreTraining * Add ViltForPreTraining to docs * Fix device issue * Add processor * Add MinMaxResize to feature extractor * Implement call method of ViltProcessor * Fix tests * Add integration test * Add loss calculation for VQA * Improve tests * Improve some more tests * Debug tests * Small improvements * Add support for attention_mask * Remove mask_it * Add pixel_mask * Add tests for ViltFeatureExtractor * Improve tests * Add ViltForNaturalLanguageVisualReasoning * Add ViltForNaturalLanguageVisualReasoning to conversion script * Minor fixes * Add support for image_embeds, update docstrings to markdown * Update docs to markdown * Improve conversion script * Rename ViltForPreTraining to ViltForMaskedLM * Improve conversion script * Convert docstrings to markdown * Fix code example of retrieval model * Properly convert masked language model * Add integration test for nlvr * Fix code quality * Apply suggestions from code review * Add copied from statements * Fix pretrained_config_archive_map * Fix docs * Add model to README * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Apply more suggestions from code review * Make code more readable * Add ViltForNaturalLanguageVisualReasoning to the tests * Rename ViltForVisualQuestionAnswering to ViltForQuestionAnswering * Replace pixel_values_2 by single tensor * Add hidden_states and attentions * Fix one more test * Fix all tests * Update year * Fix rebase issues * Fix another rebase issue * Remove ViltForPreTraining from auto mapping * Rename ViltForImageRetrievalTextRetrieval to ViltForImageAndTextRetrieval * Make it possible to use BertTokenizerFast in the processor * Use BertTokenizerFast by default * Rename ViltForNaturalLanguageVisualReasoning, define custom model output Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
_resize
ac227093e41cecb07c7e0f2fc9a504850907bd06
transformers
feature_extraction_vilt.py
11
18
https://github.com/huggingface/transformers.git
4
169
0
52
266
Python
{ "docstring": "\n Resizes the shorter edge of `image` to `shorter` and limits the longer edge to under `longer`, while preserving\n the aspect ratio. Also makes sure that both the height and width can be divided by `size_divisor`.\n\n Based on original implementation:\n https://github.com/dandelin/ViLT/blob/3db8b5035464afee84d951bf6322e1b27f1d072d/vilt/transforms/utils.py#L5\n\n Args:\n image (`PIL.Image`):\n The image to resize.\n shorter (`int`, *optional*, defaults to `800`):\n The size to which to resize the shorter side of the image.\n longer (`int`, *optional*, defaults to `1333`):\n The size by which to limit the longer side of the image, while preserving the aspect ratio.\n size_divisor (`int`, *optional*, defaults to `32`):\n The size by which both the height and the width must be divisible.\n resample (`int`, *optional*, defaults to `PIL.Image.BICUBIC`):\n An optional resampling filter.\n ", "language": "en", "n_whitespaces": 290, "n_words": 117, "vocab_size": 61 }
def _resize(self, image, shorter=800, longer=1333, size_divisor=32, resample=Image.BICUBIC): if not isinstance(image, Image.Image): image = self.to_pil_image(image) w, h = image.size min_size = shorter max_size = longer scale = min_size / min(w, h) if h < w: newh, neww = min_size, scale * w else: newh, neww = scale * h, min_size if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) newh, neww = newh // size_divisor * size_divisor, neww // size_divisor * size_divisor return self.resize(image, size=(neww, newh), resample=resample)
31,708
139,470
39
rllib/policy/dynamic_tf_policy_v2.py
11
9
def extra_action_out_fn(self) -> Dict[str, TensorType]: extra_action_fetches = super().extra_action_out_fn() extra_action_fetches.update(self._policy_extra_action_fetches) return extra_action_fetches
[RLlib] Introduce new policy base classes. (#24742)
extra_action_out_fn
bc3a1d35cf6e9a5fd7eef908a8e76aefb80ce6a9
ray
dynamic_tf_policy_v2.py
10
10
https://github.com/ray-project/ray.git
1
32
0
10
54
Python
{ "docstring": "Extra values to fetch and return from compute_actions().\n\n Returns:\n Dict[str, TensorType]: An extra fetch-dict to be passed to and\n returned from the compute_actions() call.\n ", "language": "en", "n_whitespaces": 65, "n_words": 24, "vocab_size": 20 }
def extra_action_out_fn(self) -> Dict[str, TensorType]: extra_action_fetches = super().extra_action_out_fn() extra_action_fetches.update(self._policy_extra_action_fetches) return extra_action_fetches
22,019
104,904
53
src/datasets/builder.py
14
13
def get_all_exported_dataset_infos(cls) -> dict: dset_infos_file_path = os.path.join(cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME) if os.path.exists(dset_infos_file_path): return DatasetInfosDict.from_directory(cls.get_imported_module_dir()
Add API code examples for Builder classes (#4313) * 📝 add examples for builder classes * 📝 apply quentin review
get_all_exported_dataset_infos
d1d4f1065fd4ab91b2c8682643dbd12f86d66fcd
datasets
builder.py
11
16
https://github.com/huggingface/datasets.git
2
50
0
13
96
Python
{ "docstring": "Empty dict if doesn't exist\n\n Example:\n\n ```py\n >>> from datasets import load_dataset_builder\n >>> ds_builder = load_dataset_builder('rotten_tomatoes')\n >>> ds_builder.get_all_exported_dataset_infos()\n {'default': DatasetInfo(description=\"Movie Review Dataset.\\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\\nsentiment categorization with respect to rating scales.'', Proceedings of the\\nACL, 2005.\\n\", citation='@InProceedings{Pang+Lee:05a,\\n author = {Bo Pang and Lillian Lee},\\n title = {Seeing stars: Exploiting class relationships for sentiment\\n categorization with respect to rating scales},\\n booktitle = {Proceedings of the ACL},\\n year = 2005\\n}\\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)}\n ```\n ", "language": "en", "n_whitespaces": 241, "n_words": 140, "vocab_size": 116 }
def get_all_exported_dataset_infos(cls) -> dict: dset_infos_file_path = os.path.join(cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME) if os.path.exists(dset_infos_file_path): return DatasetInfosDict.from_directory(cls.get_imported_module_dir()) return {}
118,370
323,124
174
paddlenlp/trainer/trainer_args.py
44
20
def to_sanitized_dict(self) -> Dict[str, Any]: d = self.to_dict() d = { ** d, ** { "train_batch_size": self.train_batch_size, "eval_batch_size": self.eval_batch_size } } valid_types = [bool, int,
[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761) * add some datasets for finetune. * support fine tune for all tastks. * add trainer prototype. * init verison for paddlenlp trainer. * refine trainer. * update for some details. * support multi-cards training evaluation. * support load from ckpt. * support for export inference model. * first version of trainer. * seq cls support clue. * trainer support for token classification and question answersing tasks. * fix as reviews. Co-authored-by: Zeyu Chen <chenzeyu01@baidu.com>
to_sanitized_dict
44a290e94d1becd1f09fddc3d873f9e19c9d6919
PaddleNLP
trainer_args.py
11
17
https://github.com/PaddlePaddle/PaddleNLP.git
3
88
0
33
138
Python
{ "docstring": "\n Sanitized serialization to use with TensorBoard’s hparams\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
def to_sanitized_dict(self) -> Dict[str, Any]: d = self.to_dict() d = { ** d, ** { "train_batch_size": self.train_batch_size, "eval_batch_size": self.eval_batch_size } } valid_types = [bool, int, float, str] valid_types.append(paddle.Tensor) return { k: v if type(v) in valid_types else str(v) for k, v in d.items() }
56,252
221,182
27
python3.10.4/Lib/bz2.py
6
5
def readinto(self, b): self._check_can_read()
add python 3.10.4 for windows
readinto
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
bz2.py
8
3
https://github.com/XX-net/XX-Net.git
1
22
0
6
38
Python
{ "docstring": "Read bytes into b.\n\n Returns the number of bytes read (0 for EOF).\n ", "language": "en", "n_whitespaces": 27, "n_words": 13, "vocab_size": 12 }
def readinto(self, b): self._check_can_read() return self._buffer.readinto(b)
50,997
205,032
557
django/db/backends/oracle/base.py
126
18
def _output_type_handler(cursor, name, defaultType, length, precision, scale): if defaultType == Database.NUMBER: if scale == -127: if precision == 0: # NUMBER column: decimal-precision floating point. # This will normally be an integer from a sequence, # but it could be a decimal value. outconverter = FormatStylePlaceholderCursor._output_number_converter else: # FLOAT column: binary-precision floating point. # This comes from FloatField columns. outconverter = float elif precision > 0: # NUMBER(p,s) column: decimal-precision fixed point. # This comes from IntegerField and DecimalField columns. outconverter = FormatStylePlaceholderCursor._get_decimal_converter(
Refs #33476 -- Reformatted code with Black.
_output_type_handler
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
base.py
13
19
https://github.com/django/django.git
5
90
0
77
147
Python
{ "docstring": "\n Called for each db column fetched from cursors. Return numbers as the\n appropriate Python type.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
def _output_type_handler(cursor, name, defaultType, length, precision, scale): if defaultType == Database.NUMBER: if scale == -127: if precision == 0: # NUMBER column: decimal-precision floating point. # This will normally be an integer from a sequence, # but it could be a decimal value. outconverter = FormatStylePlaceholderCursor._output_number_converter else: # FLOAT column: binary-precision floating point. # This comes from FloatField columns. outconverter = float elif precision > 0: # NUMBER(p,s) column: decimal-precision fixed point. # This comes from IntegerField and DecimalField columns. outconverter = FormatStylePlaceholderCursor._get_decimal_converter( precision, scale ) else: # No type information. This normally comes from a # mathematical expression in the SELECT list. Guess int # or Decimal based on whether it has a decimal point. outconverter = FormatStylePlaceholderCursor._output_number_converter return cursor.var( Database.STRING, size=255, arraysize=cursor.arraysize, outconverter=outconverter, )
21,771
104,101
365
src/datasets/features/features.py
121
21
def decode_nested_example(schema, obj): # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): return { k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in utils.zip_dict(schema, obj) } elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if decode_nested_example(sub_schema, first_elmt) != first_elmt: return [decode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, Sequence): # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): return {k: decode_nested_example([schema.feature[k]
Add Arrow type casting to struct for Image and Audio + Support nested casting (#3575) * add storage cast * implement dict cast for image * factorize extension type creation for audio and image + implement type cast for thos custom types * fix tests * style * [big] allow extension array in nested arrays * docs * style * fix Features pickling * fix some tests * fix more tests * fix more tests * add base extensionarray for pyarrow<6 * add extensionarray for pyarrow<6 * add soundfile to tests requirements * minor * remove not implemented error for complex casting in pyarrow 3 * style * style again * add casting for fixed size lists * add libsndfile1 in the linux CI * style * typo * start adding new tests just to notice the concatenation issue... * [big] remove extension types + move cast_storage to the Image and Audio classes * minor * fix test * style * add more tests to image * add audio tests * support casting from null array * fix field names verifications when casting * docs + tests * use the new table_cast on pyarrow tables * whoops forgot one line * remove unused string handling in Image.decode_example * update tests accordingly
decode_nested_example
6ca96c707502e0689f9b58d94f46d871fa5a3c9c
datasets
features.py
18
25
https://github.com/huggingface/datasets.git
15
207
0
79
310
Python
{ "docstring": "Decode a nested example.\n This is used since some features (in particular Audio and Image) have some logic during decoding.\n\n To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded.\n If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same.\n ", "language": "en", "n_whitespaces": 85, "n_words": 73, "vocab_size": 57 }
def decode_nested_example(schema, obj): # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): return { k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in utils.zip_dict(schema, obj) } elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if decode_nested_example(sub_schema, first_elmt) != first_elmt: return [decode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, Sequence): # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature} else: return decode_nested_example([schema.feature], obj) # Object with special decoding: elif isinstance(schema, (Audio, Image)): return schema.decode_example(obj) if obj is not None else None return obj
41,541
175,013
14
src/pip/_internal/utils/virtualenv.py
8
4
def running_under_virtualenv() -> bool: return _running_und
Name virtualenv<20 as "legacy" Well they are. At least not "regular" anymore.
running_under_virtualenv
5ded5474ac9b323496506e6391e8d8c2c888d7f1
pip
virtualenv.py
8
3
https://github.com/pypa/pip.git
2
15
0
8
29
Python
{ "docstring": "True if we're running inside a virtual environment, False otherwise.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def running_under_virtualenv() -> bool: return _running_under_venv() or _running_under_legacy_virtualenv()
70,351
244,362
398
mmdet/models/dense_heads/base_dense_head.py
97
23
def forward_train(self, x, data_samples, proposal_cfg=None, **kwargs): img_metas = [data_sample['meta'] for data_sample in data_samples] outs = self(x) gt_bboxes = [ data_sample.gt_instances.bboxes for data_sample in data_samples ] if hasattr(data_samples[0].gt_instances, 'labels'): gt_labels = [ data_sample.gt_instances.labels
Simplify api of one-stage detector
forward_train
9c5b3331ac8edbfa328922fbab45c382380da540
mmdetection
base_dense_head.py
12
30
https://github.com/open-mmlab/mmdetection.git
9
178
0
51
277
Python
{ "docstring": "\n Args:\n x (list[Tensor]): Features from FPN.\n data_samples (list[:obj:`GeneralData`]): Each item contains\n the meta information of each image and corresponding\n annotations.\n proposal_cfg (mmcv.Config): Test / postprocessing configuration,\n if None, test_cfg would be used\n\n Returns:\n tuple or Tensor: When `proposal_cfg` is None, the detector is a \\\n normal one-stage detector, The return value is the losses.\n\n - losses: (dict[str, Tensor]): A dictionary of loss components.\n\n When the `proposal_cfg` is not None, the head is used as a\n `rpn_head`, the return value is a tuple contains:\n\n - losses: (dict[str, Tensor]): A dictionary of loss components.\n - results_list (list[:obj:`InstanceData`]): Detection\n results of each image after the post process.\n Each item usually contains following keys.\n\n - scores (Tensor): Classification scores, has a shape\n (num_instance,)\n - labels (Tensor): Labels of bboxes, has a shape\n (num_instances,).\n - bboxes (Tensor): Has a shape (num_instances, 4),\n the last dimension 4 arrange as (x1, y1, x2, y2).\n ", "language": "en", "n_whitespaces": 446, "n_words": 147, "vocab_size": 95 }
def forward_train(self, x, data_samples, proposal_cfg=None, **kwargs): img_metas = [data_sample['meta'] for data_sample in data_samples] outs = self(x) gt_bboxes = [ data_sample.gt_instances.bboxes for data_sample in data_samples ] if hasattr(data_samples[0].gt_instances, 'labels'): gt_labels = [ data_sample.gt_instances.labels for data_sample in data_samples ] else: # RPN gt_labels = None if hasattr(data_samples[0], 'instances_ignore'): gt_bboxes_ignore = [ data_sample.ignored_instances.bboxes for data_sample in data_samples ] else: gt_bboxes_ignore = None if gt_labels is None: loss_inputs = outs + (gt_bboxes, img_metas) else: loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) if proposal_cfg is None: return losses else: results_list = self.get_results( *outs, img_metas=img_metas, cfg=proposal_cfg) return losses, results_list
78,277
266,040
54
netbox/netbox/models/features.py
11
7
def cf(self): return { cf.name: cf.deserialize(self.custom_field_data.get(cf.name
Closes #10052: The cf attribute now returns deserialized custom field data
cf
ea6d86e6c4bb6037465410db6205a7471bc81a6c
netbox
features.py
12
5
https://github.com/netbox-community/netbox.git
2
34
0
11
55
Python
{ "docstring": "\n Return a dictionary mapping each custom field for this instance to its deserialized value.\n\n ```python\n >>> tenant = Tenant.objects.first()\n >>> tenant.cf\n {'primary_site': <Site: DM-NYC>, 'cust_id': 'DMI01', 'is_active': True}\n ```\n ", "language": "en", "n_whitespaces": 79, "n_words": 29, "vocab_size": 28 }
def cf(self): return { cf.name: cf.deserialize(self.custom_field_data.get(cf.name)) for cf in self.custom_fields }
42,292
177,153
30
networkx/drawing/tests/test_layout.py
9
10
def test_arf_layout_negative_a_check(self): G = self.Gs pytest.raises(ValueError, nx.arf_layout, G=G, a=-
Arf layout (#5910) * added arf_layout * reference to docstring and comparison to spring layout * rebase to origin main * black re-format * Left aligned docstring text * Cleaned up computation and update variables to new docstring * Updated naming tests. Added input check on arf_layout parameter `a` * Fixed Linter issues for py38 target * Fixed Linter issues for target p38 * linter issue fixed
test_arf_layout_negative_a_check
88245f69f89dbee75cef67bdf35bbfb986a42d52
networkx
test_layout.py
9
3
https://github.com/networkx/networkx.git
1
30
0
9
48
Python
{ "docstring": "\n Checks input parameters correctly raises errors. For example, `a` should be larger than 1\n ", "language": "en", "n_whitespaces": 30, "n_words": 14, "vocab_size": 14 }
def test_arf_layout_negative_a_check(self): G = self.Gs pytest.raises(ValueError, nx.arf_layout, G=G, a=-1)
91,386
292,291
18
tests/components/device_tracker/test_config_entry.py
9
5
async def test_connected_device_registered(hass): registry = mock_registry(hass) dispatches = []
Ensure dhcp can still discover new devices from device trackers (#66822) Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
test_connected_device_registered
a18d4c51ff3ab9afd13ee08fe8c65e2f9b77f3b1
core
test_config_entry.py
8
50
https://github.com/home-assistant/core.git
1
204
0
8
31
Python
{ "docstring": "Test dispatch on connected device being registered.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
async def test_connected_device_registered(hass): registry = mock_registry(hass) dispatches = []
@RunIf(min_gpus=2, skip_windows=True, fairscale=True)
69,605
241,580
72
tests/strategies/test_sharded_strategy.py
39
28
def test_ddp_sharded_strategy_checkpoint_multi_gpu(tmpdir): model = BoringModel() trainer = Trainer(gpus=2, strategy="ddp_sharded_spawn", fast_dev_run=True) trainer.fit(model) checkpoint_path = os.path.join(tmpdir, "model.pt") trainer.save_checkpoint(checkpoint_path)
Rename training plugin test files & names to strategy (#11303)
test_ddp_sharded_strategy_checkpoint_multi_gpu
650c710efacd633fa283955145342bb64063c883
lightning
test_sharded_strategy.py
12
9
https://github.com/Lightning-AI/lightning.git
2
93
1
35
177
Python
{ "docstring": "Test to ensure that checkpoint is saved correctly when using multiple GPUs.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def test_ddp_sharded_strategy_checkpoint_multi_gpu(tmpdir): model = BoringModel() trainer = Trainer(gpus=2, strategy="ddp_sharded_spawn", fast_dev_run=True) trainer.fit(model) checkpoint_path = os.path.join(tmpdir, "model.pt") trainer.save_checkpoint(checkpoint_path) saved_model = BoringModel.load_from_checkpoint(checkpoint_path) # Assert model parameters are identical after loading for ddp_param, shard_param in zip(model.parameters(), saved_model.parameters()): assert torch.equal(ddp_param.to("cpu"), shard_param) @RunIf(min_gpus=2, skip_windows=True, fairscale=True)
29,408
130,870
522
python/ray/serve/controller.py
85
38
def autoscale(self) -> None: for deployment_name, ( deployment_info, route_prefix, ) in self.list_deployments().items(): deployment_config = deployment_info.deployment_config autoscaling_policy = depl
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
autoscale
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
controller.py
15
36
https://github.com/ray-project/ray.git
6
180
0
56
284
Python
{ "docstring": "Updates autoscaling deployments with calculated num_replicas.", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
def autoscale(self) -> None: for deployment_name, ( deployment_info, route_prefix, ) in self.list_deployments().items(): deployment_config = deployment_info.deployment_config autoscaling_policy = deployment_info.autoscaling_policy if autoscaling_policy is None: continue replicas = self.deployment_state_manager._deployment_states[ deployment_name ]._replicas running_replicas = replicas.get([ReplicaState.RUNNING]) current_num_ongoing_requests = [] for replica in running_replicas: replica_tag = replica.replica_tag num_ongoing_requests = self.autoscaling_metrics_store.window_average( replica_tag, time.time() - autoscaling_policy.config.look_back_period_s, ) if num_ongoing_requests is not None: current_num_ongoing_requests.append(num_ongoing_requests) if len(current_num_ongoing_requests) == 0: continue new_deployment_config = deployment_config.copy() decision_num_replicas = autoscaling_policy.get_decision_num_replicas( current_num_ongoing_requests=current_num_ongoing_requests, curr_target_num_replicas=deployment_config.num_replicas, ) new_deployment_config.num_replicas = decision_num_replicas new_deployment_info = copy(deployment_info) new_deployment_info.deployment_config = new_deployment_config goal_id, updating = self.deployment_state_manager.deploy( deployment_name, new_deployment_info )
52,855
210,095
18
ppdet/utils/checkpoint.py
9
7
def match_state_dict(model_state_dict, weight_state_dict):
Add PP-YOLOv3 code (#5281) * [ppyolov3] add ppyolov3 base code * add ppyolov3 s/m/x * modify ema * modify code to convert onnx successfully * support arbitrary shape * update config to use amp default * refine ppyolo_head code * modify reparameter code * refine act layer * adapter pico_head and tood_head code * remove ppyolov3 yaml * fix codestyle Co-authored-by: wangxinxin08 <wangxinxin08@baidu.com>
match_state_dict
ef83ab8a3f7814e9886a7a22c8dcc55f506b6081
PaddleDetection
checkpoint.py
10
46
https://github.com/PaddlePaddle/PaddleDetection.git
11
305
0
8
49
Python
{ "docstring": "\n Match between the model state dict and pretrained weight state dict.\n Return the matched state dict.\n\n The method supposes that all the names in pretrained weight state dict are\n subclass of the names in models`, if the prefix 'backbone.' in pretrained weight\n keys is stripped. And we could get the candidates for each model key. Then we\n select the name with the longest matched size as the final match result. For\n example, the model state dict has the name of\n 'backbone.res2.res2a.branch2a.conv.weight' and the pretrained weight as\n name of 'res2.res2a.branch2a.conv.weight' and 'branch2a.conv.weight'. We\n match the 'res2.res2a.branch2a.conv.weight' to the model key.\n ", "language": "en", "n_whitespaces": 133, "n_words": 99, "vocab_size": 55 }
def match_state_dict(model_state_dict, weight_state_dict): model_keys = sorted(model_state_dict.keys()) weight_keys = sorted(weight_state_dict.keys())
41,908
176,447
200
networkx/algorithms/approximation/connectivity.py
74
23
def local_node_connectivity(G, source, target, cutoff=None): if target == source: raise nx.NetworkXError("source and target have to be different nodes.") # Maximum possible node independent paths if G.is_directed(): possible = min(G.out_degree(source), G.in_degree(target)) else: possible = min(G.degree(source), G.degree(target)) K = 0 if not possible: return K if cutoff is None: cutoff = float("inf") exclude = set() for i in range(min(possible, cutoff)): try: path = _bidirectional_shortest_path(G, source, target, exclude)
Minor improvements from general code readthrough (#5414) * Add deprecated directive to reversed docstring. * Add missing dep directives to shpfiles. * Remove defn of INF sentinel. * typo. * str -> comment in forloop. * STY: appropriate casing for var name.
local_node_connectivity
cc1db275efc709cb964ce88abbfa877798d58c10
networkx
connectivity.py
13
21
https://github.com/networkx/networkx.git
7
143
0
56
232
Python
{ "docstring": "Compute node connectivity between source and target.\n\n Pairwise or local node connectivity between two distinct and nonadjacent\n nodes is the minimum number of nodes that must be removed (minimum\n separating cutset) to disconnect them. By Menger's theorem, this is equal\n to the number of node independent paths (paths that share no nodes other\n than source and target). Which is what we compute in this function.\n\n This algorithm is a fast approximation that gives an strict lower\n bound on the actual number of node independent paths between two nodes [1]_.\n It works for both directed and undirected graphs.\n\n Parameters\n ----------\n\n G : NetworkX graph\n\n source : node\n Starting node for node connectivity\n\n target : node\n Ending node for node connectivity\n\n cutoff : integer\n Maximum node connectivity to consider. If None, the minimum degree\n of source or target is used as a cutoff. Default value None.\n\n Returns\n -------\n k: integer\n pairwise node connectivity\n\n Examples\n --------\n >>> # Platonic octahedral graph has node connectivity 4\n >>> # for each non adjacent node pair\n >>> from networkx.algorithms import approximation as approx\n >>> G = nx.octahedral_graph()\n >>> approx.local_node_connectivity(G, 0, 5)\n 4\n\n Notes\n -----\n This algorithm [1]_ finds node independents paths between two nodes by\n computing their shortest path using BFS, marking the nodes of the path\n found as 'used' and then searching other shortest paths excluding the\n nodes marked as used until no more paths exist. It is not exact because\n a shortest path could use nodes that, if the path were longer, may belong\n to two different node independent paths. Thus it only guarantees an\n strict lower bound on node connectivity.\n\n Note that the authors propose a further refinement, losing accuracy and\n gaining speed, which is not implemented yet.\n\n See also\n --------\n all_pairs_node_connectivity\n node_connectivity\n\n References\n ----------\n .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for\n Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035\n http://eclectic.ss.uci.edu/~drwhite/working.pdf\n\n ", "language": "en", "n_whitespaces": 494, "n_words": 314, "vocab_size": 192 }
def local_node_connectivity(G, source, target, cutoff=None): if target == source: raise nx.NetworkXError("source and target have to be different nodes.") # Maximum possible node independent paths if G.is_directed(): possible = min(G.out_degree(source), G.in_degree(target)) else: possible = min(G.degree(source), G.degree(target)) K = 0 if not possible: return K if cutoff is None: cutoff = float("inf") exclude = set() for i in range(min(possible, cutoff)): try: path = _bidirectional_shortest_path(G, source, target, exclude) exclude.update(set(path)) K += 1 except nx.NetworkXNoPath: break return K
35,618
153,802
1,145
modin/core/dataframe/pandas/dataframe/dataframe.py
304
48
def _copartition(self, axis, other, how, sort, force_repartition=False): if isinstance(other, type(self)): other = [other] self_index = self.axes[axis] others_index = [o.axes[axis] for o in other] joined_index, make_reindexer = self._join_index_objects( axis, [self_index] + others_index, how, sort ) frames = [self] + other non_empty_frames_idx = [ i for i, o in enumerate(frames) if o._partitions.size != 0 ] # If all frames are empty if len(non_empty_frames_idx) == 0: return ( self._partitions, [o._partitions for o in other], joined_index, # There are no partition sizes because the resulting dataframe # has no partitions. [], ) base_frame_idx = non_empty_frames_idx[0] other_frames = frames[base_frame_idx + 1 :] # Picking first non-empty frame base_frame = frames[non_empty_frames_idx[0]] base_index = base_frame.axes[axis] # define conditions for reindexing and repartitioning `self` frame do_reindex_base = not base_index.equals(joined_index) do_repartition_base = force_repartition or do_reindex_base # Perform repartitioning and reindexing for `base_frame` if needed. # Also define length of base and fra
PERF-#4493: Use partition size caches more in Modin dataframe. (#4495) Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com> Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru> Signed-off-by: mvashishtha <mahesh@ponder.io>
_copartition
cca9468648521e9317de1cb69cf8e6b1d5292d21
modin
dataframe.py
16
68
https://github.com/modin-project/modin.git
21
462
0
163
694
Python
{ "docstring": "\n Copartition two Modin DataFrames.\n\n Perform aligning of partitions, index and partition blocks.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to copartition along (0 - rows, 1 - columns).\n other : PandasDataframe\n Other Modin DataFrame(s) to copartition against.\n how : str\n How to manage joining the index object (\"left\", \"right\", etc.).\n sort : bool\n Whether sort the joined index or not.\n force_repartition : bool, default: False\n Whether force the repartitioning or not. By default,\n this method will skip repartitioning if it is possible. This is because\n reindexing is extremely inefficient. Because this method is used to\n `join` or `append`, it is vital that the internal indices match.\n\n Returns\n -------\n tuple\n Tuple containing:\n 1) 2-d NumPy array of aligned left partitions\n 2) list of 2-d NumPy arrays of aligned right partitions\n 3) joined index along ``axis``\n 4) List with sizes of partitions along axis that partitioning\n was done on. This list will be empty if and only if all\n the frames are empty.\n ", "language": "en", "n_whitespaces": 448, "n_words": 161, "vocab_size": 111 }
def _copartition(self, axis, other, how, sort, force_repartition=False): if isinstance(other, type(self)): other = [other] self_index = self.axes[axis] others_index = [o.axes[axis] for o in other] joined_index, make_reindexer = self._join_index_objects( axis, [self_index] + others_index, how, sort ) frames = [self] + other non_empty_frames_idx = [ i for i, o in enumerate(frames) if o._partitions.size != 0 ] # If all frames are empty if len(non_empty_frames_idx) == 0: return ( self._partitions, [o._partitions for o in other], joined_index, # There are no partition sizes because the resulting dataframe # has no partitions. [], ) base_frame_idx = non_empty_frames_idx[0] other_frames = frames[base_frame_idx + 1 :] # Picking first non-empty frame base_frame = frames[non_empty_frames_idx[0]] base_index = base_frame.axes[axis] # define conditions for reindexing and repartitioning `self` frame do_reindex_base = not base_index.equals(joined_index) do_repartition_base = force_repartition or do_reindex_base # Perform repartitioning and reindexing for `base_frame` if needed. # Also define length of base and frames. We will need to know the # lengths for alignment. if do_repartition_base: reindexed_base = base_frame._partition_mgr_cls.map_axis_partitions( axis, base_frame._partitions, make_reindexer(do_reindex_base, base_frame_idx), ) if axis: base_lengths = [obj.width() for obj in reindexed_base[0]] else: base_lengths = [obj.length() for obj in reindexed_base.T[0]] else: reindexed_base = base_frame._partitions base_lengths = self._column_widths if axis else self._row_lengths others_lengths = [o._axes_lengths[axis] for o in other_frames] # define conditions for reindexing and repartitioning `other` frames do_reindex_others = [ not o.axes[axis].equals(joined_index) for o in other_frames ] do_repartition_others = [None] * len(other_frames) for i in range(len(other_frames)): do_repartition_others[i] = ( force_repartition or do_reindex_others[i] or others_lengths[i] != base_lengths ) # perform repartitioning and reindexing for `other_frames` if needed reindexed_other_list = [None] * len(other_frames) for i in range(len(other_frames)): if do_repartition_others[i]: # indices of others frame start from `base_frame_idx` + 1 reindexed_other_list[i] = other_frames[ i ]._partition_mgr_cls.map_axis_partitions( axis, other_frames[i]._partitions, make_reindexer(do_repartition_others[i], base_frame_idx + 1 + i), lengths=base_lengths, ) else: reindexed_other_list[i] = other_frames[i]._partitions reindexed_frames = ( [frames[i]._partitions for i in range(base_frame_idx)] + [reindexed_base] + reindexed_other_list ) return (reindexed_frames[0], reindexed_frames[1:], joined_index, base_lengths)
40,414
169,303
45
pandas/core/indexes/multi.py
18
4
def size(self) -> int: # override Index.size to avoid materializing _values return len(self) # -------------------------------------------
PERF: MultiIndex.size (#48723) * add MultiIndex.size * whatsnew
size
2fbdd1eb4ef73a470f3db60cbf38a7d9f6c3ffe1
pandas
multi.py
7
5
https://github.com/pandas-dev/pandas.git
1
13
0
16
27
Python
{ "docstring": "\n Return the number of elements in the underlying data.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 8 }
def size(self) -> int: # override Index.size to avoid materializing _values return len(self) # -------------------------------------------------------------------- # Levels Methods
35,248
153,079
937
modin/experimental/core/execution/native/implementations/omnisci_on_native/dataframe/dataframe.py
278
49
def groupby_agg(self, by, axis, agg, groupby_args, **kwargs): # Currently we only expect 'by' to be a projection of the same frame. # If 'by' holds a list of columns/series, then we create such projection # to re-use code. if not isinstance(by, DFAlgQueryCompiler): if is_list_like(by): by_cols = [] by_frames = [] for obj in by: if isinstance(obj, str): by_cols.append(obj) elif hasattr(obj, "_modin_frame"): by_frames.append(obj._modin_frame) else: raise NotImplementedError("unsupported groupby args") by_cols = Index.__new__(Index, data=by_cols, dtype=self
REFACTOR-#2656: Update modin to fit algebra (code only) (#3717) Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru> Co-authored-by: Vasily Litvinov <vasilij.n.litvinov@intel.com> Co-authored-by: Alexey Prutskov <alexey.prutskov@intel.com> Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com> Signed-off-by: Rehan Durrani <rehan@ponder.io>
groupby_agg
58bbcc37477866d19c8b092a0e1974a4f0baa586
modin
dataframe.py
17
105
https://github.com/modin-project/modin.git
34
774
0
161
546
Python
{ "docstring": "\n Groupby with aggregation operation.\n\n Parameters\n ----------\n by : DFAlgQueryCompiler or list-like of str\n Grouping keys.\n axis : {0, 1}\n Only rows groupby is supported, so should be 0.\n agg : str or dict\n Aggregates to compute.\n groupby_args : dict\n Additional groupby args.\n **kwargs : dict\n Keyword args. Currently ignored.\n\n Returns\n -------\n OmnisciOnNativeDataframe\n The new frame.\n ", "language": "en", "n_whitespaces": 206, "n_words": 55, "vocab_size": 45 }
def groupby_agg(self, by, axis, agg, groupby_args, **kwargs): # Currently we only expect 'by' to be a projection of the same frame. # If 'by' holds a list of columns/series, then we create such projection # to re-use code. if not isinstance(by, DFAlgQueryCompiler): if is_list_like(by): by_cols = [] by_frames = [] for obj in by: if isinstance(obj, str): by_cols.append(obj) elif hasattr(obj, "_modin_frame"): by_frames.append(obj._modin_frame) else: raise NotImplementedError("unsupported groupby args") by_cols = Index.__new__(Index, data=by_cols, dtype=self.columns.dtype) by_frame = self.mask(col_labels=by_cols) if by_frames: by_frame = by_frame.concat( axis=1, other_modin_frames=by_frames, ignore_index=True ) else: raise NotImplementedError("unsupported groupby args") else: by_frame = by._modin_frame if axis != 0: raise NotImplementedError("groupby is supported for axis = 0 only") base = by_frame._find_common_projections_base(self) if base is None: raise NotImplementedError("unsupported groupby args") if groupby_args["level"] is not None: raise NotImplementedError("levels are not supported for groupby") drop = kwargs.get("drop", True) as_index = groupby_args.get("as_index", True) groupby_cols = by_frame.columns if isinstance(agg, dict): agg_cols = agg.keys() elif not drop: # If 'by' data came from a different frame then 'self-aggregation' # columns are more prioritized. agg_cols = self.columns else: agg_cols = [col for col in self.columns if col not in groupby_cols] # Mimic pandas behaviour: pandas does not allow for aggregation to be empty # in case of multi-column 'by'. if not as_index and len(agg_cols) == 0 and len(groupby_cols) > 1: agg_cols = self.columns # Create new base where all required columns are computed. We don't allow # complex expressions to be a group key or an aggeregate operand. allowed_nodes = (FrameNode, TransformNode) if not isinstance(by_frame._op, allowed_nodes): raise NotImplementedError( "OmniSci doesn't allow complex expression to be a group key. " f"The only allowed frame nodes are: {tuple(o.__name__ for o in allowed_nodes)}, " f"met '{type(by_frame._op).__name__}'." ) col_to_delete_template = "__delete_me_{name}"
18,931
92,539
214
src/sentry/snuba/tasks.py
48
29
def delete_subscription_from_snuba(query_subscription_id, **kwargs): try: subscription = QuerySubscription.objects.get(id=query_subscription_id) except QuerySubscription.DoesNotExist: metrics.incr("snuba.subscriptions.delete.subscription_does_not_exist") return if subscription.status not in [ QuerySubscription.Status.DELETING.value, QuerySubscription.Status.DISABLED.value, ]: metrics.incr("snuba.subscriptions.delete.incorrect_status") return if subscription.subscription_id is not None: query_dataset = QueryDatasets(subscription.snuba_query.dataset) entity_key = get_entity_key_from_snuba_query( subscription.snuba_query, subscription.project.organization_id, subscription.project_id ) _delete_from_s
feat(mep): Restructure how we determine entity subscription for alerts (#36605) Previously we mapped a specific `EntityKey` to all `EntitySubscription` classes. As part of introducing metric based performance alerts, we want to have the `EntitySubscription` determine the specific entity that the subscription will run on. This allows us to automatically determine the correct entity for metric based alerts without having to duplicate logic that parses aggregates/datasets/etc.
delete_subscription_from_snuba
06885ee7284a274d02a9dc1f6a0348c8edc07184
sentry
tasks.py
12
26
https://github.com/getsentry/sentry.git
5
142
0
40
227
Python
{ "docstring": "\n Task to delete a corresponding subscription in Snuba from a `QuerySubscription` in\n Sentry.\n If the local subscription is marked for deletion (as opposed to disabled),\n then we delete the local subscription once we've successfully removed from Snuba.\n ", "language": "en", "n_whitespaces": 53, "n_words": 37, "vocab_size": 28 }
def delete_subscription_from_snuba(query_subscription_id, **kwargs): try: subscription = QuerySubscription.objects.get(id=query_subscription_id) except QuerySubscription.DoesNotExist: metrics.incr("snuba.subscriptions.delete.subscription_does_not_exist") return if subscription.status not in [ QuerySubscription.Status.DELETING.value, QuerySubscription.Status.DISABLED.value, ]: metrics.incr("snuba.subscriptions.delete.incorrect_status") return if subscription.subscription_id is not None: query_dataset = QueryDatasets(subscription.snuba_query.dataset) entity_key = get_entity_key_from_snuba_query( subscription.snuba_query, subscription.project.organization_id, subscription.project_id ) _delete_from_snuba( query_dataset, subscription.subscription_id, entity_key, ) if subscription.status == QuerySubscription.Status.DELETING.value: subscription.delete() else: subscription.update(subscription_id=None)
@pytest.mark.parametrize('count, expected', [(1, 100), (3, 300), (5, 500), (7, 500)])
117,443
320,931
89
tests/unit/mainwindow/test_messageview.py
26
18
def test_changing_timer_with_messages_shown(qtbot, view, config_stub): config_stub.val.messages.timeout = 900000 # 15s view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test')) with qtbot.wait_signal(view._clear_timer.timeout): config_stub.val.messages.timeout = 100 @pytest.mark.parametrize('count, expected', [(1, 100), (3, 300), (5, 500), (7, 500)])
Add a MessageInfo data class Preparation for #7246
test_changing_timer_with_messages_shown
5616a99eff34f7074641d1391ed77d6b4b743529
qutebrowser
test_messageview.py
11
5
https://github.com/qutebrowser/qutebrowser.git
1
57
1
24
143
Python
{ "docstring": "When we change messages.timeout, the timer should be restarted.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def test_changing_timer_with_messages_shown(qtbot, view, config_stub): config_stub.val.messages.timeout = 900000 # 15s view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test')) with qtbot.wait_signal(view._clear_timer.timeout): config_stub.val.messages.timeout = 100 @pytest.mark.parametrize('count, expected', [(1, 100), (3, 300), (5, 500), (7, 500)])
14,108
66,132
62
erpnext/hr/doctype/interview/interview.py
96
31
def get_events(start, end, filters=None): from frappe.desk.calendar import get_event_conditions events = [] event_color = { "Pending": "#fff4f0", "Under Review": "#d3e8fc", "Cleared": "#eaf5ed", "Rejected": "#fce7e7", } conditions = get_event_conditions("Interview", filters) interviews = frappe.db.sql( .format( conditions=conditions ), {"start": start, "end": end}, as_dict=True, update={"allDay": 0}, ) for d in interviews: subject_data = [] for field in ["name", "job_applicant", "interview_round"]: if not d.get(field): continue subject_data.append(d.get(field)) color = event_color.get(d.status) interview_data = { "from": get_datetime("%s %s" % (d.scheduled_on, d.from_time or "00:00:00")), "to": get_datetime("%s %s" % (d.scheduled_on, d.to_time or "00:00:00")), "name": d.name, "subject": "
style: format code with black
get_events
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
interview.py
16
45
https://github.com/frappe/erpnext.git
7
216
0
75
373
Python
{ "docstring": "Returns events for Gantt / Calendar view rendering.\n\n\t:param start: Start date-time.\n\t:param end: End date-time.\n\t:param filters: Filters (JSON).\n\t\n\t\t\tSELECT DISTINCT\n\t\t\t\t`tabInterview`.name, `tabInterview`.job_applicant, `tabInterview`.interview_round,\n\t\t\t\t`tabInterview`.scheduled_on, `tabInterview`.status, `tabInterview`.from_time as from_time,\n\t\t\t\t`tabInterview`.to_time as to_time\n\t\t\tfrom\n\t\t\t\t`tabInterview`\n\t\t\twhere\n\t\t\t\t(`tabInterview`.scheduled_on between %(start)s and %(end)s)\n\t\t\t\tand docstatus != 2\n\t\t\t\t{conditions}\n\t\t\t", "language": "en", "n_whitespaces": 32, "n_words": 46, "vocab_size": 41 }
def get_events(start, end, filters=None): from frappe.desk.calendar import get_event_conditions events = [] event_color = { "Pending": "#fff4f0", "Under Review": "#d3e8fc", "Cleared": "#eaf5ed", "Rejected": "#fce7e7", } conditions = get_event_conditions("Interview", filters) interviews = frappe.db.sql( .format( conditions=conditions ), {"start": start, "end": end}, as_dict=True, update={"allDay": 0}, ) for d in interviews: subject_data = [] for field in ["name", "job_applicant", "interview_round"]: if not d.get(field): continue subject_data.append(d.get(field)) color = event_color.get(d.status) interview_data = { "from": get_datetime("%s %s" % (d.scheduled_on, d.from_time or "00:00:00")), "to": get_datetime("%s %s" % (d.scheduled_on, d.to_time or "00:00:00")), "name": d.name, "subject": "\n".join(subject_data), "color": color if color else "#89bcde", } events.append(interview_data) return events
31,364
138,228
263
python/ray/tune/tests/test_experiment.py
33
12
def testFuncTrainableCheckpointConfigValidation(self): with self.assertRaises(ValueError): Experiment( name="foo", run="f1", # Will point to a wrapped function trainable checkpoint_c
[Tune] Fix CheckpointConfig validation for function trainables (#31255) This fixes an issue where a ValueError wasn't being properly raised when passing in a function trainable and setting `checkpoint_at_end=True` or `checkpoint_frequency > 0`. Previously, the error was only raised for function trainables of the form `def train_func(config, checkpoint_dir):`, which is the old checkpoint dir function API. Signed-off-by: Justin Yu <justinvyu@berkeley.edu>
testFuncTrainableCheckpointConfigValidation
51b56ad0118ed3f4341410e8c75625d1ca8cd757
ray
test_experiment.py
13
19
https://github.com/ray-project/ray.git
1
93
0
21
161
Python
{ "docstring": "Raise an error when trying to specify checkpoint_at_end/checkpoint_frequency\n with a function trainable.", "language": "en", "n_whitespaces": 18, "n_words": 12, "vocab_size": 12 }
def testFuncTrainableCheckpointConfigValidation(self): with self.assertRaises(ValueError): Experiment( name="foo", run="f1", # Will point to a wrapped function trainable checkpoint_config=CheckpointConfig(checkpoint_at_end=True), ) with self.assertRaises(ValueError): Experiment( name="foo", run="f1", checkpoint_config=CheckpointConfig(checkpoint_frequency=1), ) with self.assertRaises(ValueError): Experiment( name="foo", run=lambda config: 1, checkpoint_config=CheckpointConfig(checkpoint_at_end=True), )
1,631
9,551
220
reconstruction/ostec/utils/align2stylegan.py
102
23
def create_perspective_transform(src, dst, round=False, splat_args=False): try: transform_matrix = create_perspective_transform_matrix(src, dst) error = None except np.linalg.LinAlgError as e: transform_matrix = np.identity(3, dtype=np.float) error = "invalid input quads (%s and %s): %s" %(src, dst, e) error = error.replace("\n", "") to_eval = "def perspective_transform(%s):\n" %( splat_args and "*pt" or "pt", ) to_eval += " res = np.dot(transform_matrix, ((pt[0], ), (pt[1], ), (1, )))\n" to_eval += " res = res / res[2]\n" if round: to_eval += " return (int(round(r
initialize ostec
create_perspective_transform
7375ee364e0df2a417f92593e09557f1b2a3575a
insightface
align2stylegan.py
13
26
https://github.com/deepinsight/insightface.git
5
144
0
67
254
Python
{ "docstring": " Returns a function which will transform points in quadrilateral\n ``src`` to the corresponding points on quadrilateral ``dst``::\n\n >>> transform = create_perspective_transform(\n ... [(0, 0), (10, 0), (10, 10), (0, 10)],\n ... [(50, 50), (100, 50), (100, 100), (50, 100)],\n ... )\n >>> transform((5, 5))\n (74.99999999999639, 74.999999999999957)\n\n If ``round`` is ``True`` then points will be rounded to the nearest\n integer and integer values will be returned.\n\n >>> transform = create_perspective_transform(\n ... [(0, 0), (10, 0), (10, 10), (0, 10)],\n ... [(50, 50), (100, 50), (100, 100), (50, 100)],\n ... round=True,\n ... )\n >>> transform((5, 5))\n (75, 75)\n\n If ``splat_args`` is ``True`` the function will accept two arguments\n instead of a tuple.\n\n >>> transform = create_perspective_transform(\n ... [(0, 0), (10, 0), (10, 10), (0, 10)],\n ... [(50, 50), (100, 50), (100, 100), (50, 100)],\n ... splat_args=True,\n ... )\n >>> transform(5, 5)\n (74.99999999999639, 74.999999999999957)\n\n If the input values yield an invalid transformation matrix an identity\n function will be returned and the ``error`` attribute will be set to a\n description of the error::\n\n >>> tranform = create_perspective_transform(\n ... np.zeros((4, 2)),\n ... np.zeros((4, 2)),\n ... )\n >>> transform((5, 5))\n (5.0, 5.0)\n >>> transform.error\n 'invalid input quads (...): Singular matrix\n ", "language": "en", "n_whitespaces": 606, "n_words": 194, "vocab_size": 84 }
def create_perspective_transform(src, dst, round=False, splat_args=False): try: transform_matrix = create_perspective_transform_matrix(src, dst) error = None except np.linalg.LinAlgError as e: transform_matrix = np.identity(3, dtype=np.float) error = "invalid input quads (%s and %s): %s" %(src, dst, e) error = error.replace("\n", "") to_eval = "def perspective_transform(%s):\n" %( splat_args and "*pt" or "pt", ) to_eval += " res = np.dot(transform_matrix, ((pt[0], ), (pt[1], ), (1, )))\n" to_eval += " res = res / res[2]\n" if round: to_eval += " return (int(round(res[0][0])), int(round(res[1][0])))\n" else: to_eval += " return (res[0][0], res[1][0])\n" locals = { "transform_matrix": transform_matrix, } locals.update(globals()) exec(to_eval,locals,locals) res = locals["perspective_transform"] res.matrix = transform_matrix res.error = error return res
117,539
321,111
111
qutebrowser/browser/qtnetworkdownloads.py
29
19
def get(self, url, cache=True, **kwargs): if not url.isValid(): urlutils.invalid_url_error(url, "start download") return None req = QNetworkRequest(url) user_agent = websettings.user_agent(url) req.setHeader(QNetworkRequest.KnownHeaders.UserAgentHeader, user_agent) if not cache: req.setAttribute(QNetworkRequest.Attribute.CacheSaveControlAttribute, False) return self.get_request(req, **kw
Run scripts/dev/rewrite_enums.py
get
0877fb0d78635692e481c8bde224fac5ad0dd430
qutebrowser
qtnetworkdownloads.py
11
10
https://github.com/qutebrowser/qutebrowser.git
3
85
0
25
136
Python
{ "docstring": "Start a download with a link URL.\n\n Args:\n url: The URL to get, as QUrl\n cache: If set to False, don't cache the response.\n **kwargs: passed to get_request().\n\n Return:\n The created DownloadItem.\n ", "language": "en", "n_whitespaces": 97, "n_words": 32, "vocab_size": 28 }
def get(self, url, cache=True, **kwargs): if not url.isValid(): urlutils.invalid_url_error(url, "start download") return None req = QNetworkRequest(url) user_agent = websettings.user_agent(url) req.setHeader(QNetworkRequest.KnownHeaders.UserAgentHeader, user_agent) if not cache: req.setAttribute(QNetworkRequest.Attribute.CacheSaveControlAttribute, False) return self.get_request(req, **kwargs)
71,160
246,332
518
tests/federation/test_federation_server.py
106
31
def test_send_join_partial_state(self): joining_user = "@misspiggy:" + self.OTHER_SERVER_NAME join_result = self._make_join(joining_user) join_event_dict = join_result["event"] add_hashes_and_signatures( KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], join_event_dict, signature_name=self.OTHER_SERVER_NAME, signing_key=self.OTHER_SERVER_SIGNATURE_KEY, ) channel = self.make_signed_federation_request( "PUT", f"/_matrix/federation/v2/send_join/{self._room_id}/x?org.matrix.msc3706.partial_state=true", content=join_event_dict, ) self.assertEquals(channel.code, 200, channel.json_body) # expect a reduced room state returned_state = [ (ev["type"], ev["state_key"]) for ev in channel.json_body["state"] ] self.assertCountEqual( returned_state, [ ("m.room.create", ""), ("m.room.power_levels", ""), ("m.room.join_rules", ""), ("m.room.history_visibility", ""), ], ) # the auth chain should not include anything already in "state" returned_auth_chain_events = [ (ev["type"], ev["state_key"]) for ev in channel.json_body["auth_chain"] ] self.assertCountEqual( returned_auth
Implement MSC3706: partial state in `/send_join` response (#11967) * Make `get_auth_chain_ids` return a Set It has a set internally, and a set is often useful where it gets used, so let's avoid converting to an intermediate list. * Minor refactors in `on_send_join_request` A little bit of non-functional groundwork * Implement MSC3706: partial state in /send_join response
test_send_join_partial_state
63c46349c41aa967e64a5a4042ef5177f934be47
synapse
test_federation_server.py
13
41
https://github.com/matrix-org/synapse.git
3
215
0
74
360
Python
{ "docstring": "When MSC3706 support is enabled, /send_join should return partial state", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def test_send_join_partial_state(self): joining_user = "@misspiggy:" + self.OTHER_SERVER_NAME join_result = self._make_join(joining_user) join_event_dict = join_result["event"] add_hashes_and_signatures( KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], join_event_dict, signature_name=self.OTHER_SERVER_NAME, signing_key=self.OTHER_SERVER_SIGNATURE_KEY, ) channel = self.make_signed_federation_request( "PUT", f"/_matrix/federation/v2/send_join/{self._room_id}/x?org.matrix.msc3706.partial_state=true", content=join_event_dict, ) self.assertEquals(channel.code, 200, channel.json_body) # expect a reduced room state returned_state = [ (ev["type"], ev["state_key"]) for ev in channel.json_body["state"] ] self.assertCountEqual( returned_state, [ ("m.room.create", ""), ("m.room.power_levels", ""), ("m.room.join_rules", ""), ("m.room.history_visibility", ""), ], ) # the auth chain should not include anything already in "state" returned_auth_chain_events = [ (ev["type"], ev["state_key"]) for ev in channel.json_body["auth_chain"] ] self.assertCountEqual( returned_auth_chain_events, [ ("m.room.member", "@kermit:test"), ], ) # the room should show that the new user is a member r = self.get_success( self.hs.get_state_handler().get_current_state(self._room_id) ) self.assertEqual(r[("m.room.member", joining_user)].membership, "join")
56,111
220,753
267
python3.10.4/Lib/asyncio/streams.py
87
10
async def drain(self): if self._reader is not None: exc = self._reader.exception() if exc is not None: raise exc if self._transport.is_closing(): # Wait for protocol.connection_lost() call # Raise connection closing error if any, # ConnectionResetError otherwise # Yield to the event loop so connection_lost() may be # called. Without this, _drain_helper() would return # immediately, and code that calls # write(...); await drain() # in a loop would never call connection_lost(), so it # would not see an error when the socket is closed. await sleep(0) await self._
add python 3.10.4 for windows
drain
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
streams.py
11
8
https://github.com/XX-net/XX-Net.git
4
53
0
60
100
Python
{ "docstring": "Flush the write buffer.\n\n The intended use is to write\n\n w.write(data)\n await w.drain()\n ", "language": "en", "n_whitespaces": 45, "n_words": 13, "vocab_size": 12 }
async def drain(self): if self._reader is not None: exc = self._reader.exception() if exc is not None: raise exc if self._transport.is_closing(): # Wait for protocol.connection_lost() call # Raise connection closing error if any, # ConnectionResetError otherwise # Yield to the event loop so connection_lost() may be # called. Without this, _drain_helper() would return # immediately, and code that calls # write(...); await drain() # in a loop would never call connection_lost(), so it # would not see an error when the socket is closed. await sleep(0) await self._protocol._drain_helper()
52,191
208,066
48
celery/canvas.py
16
9
def on_chord_header_start(self, chord, **header) -> dict: if not isinstance(chord.tasks, group): chord.tasks = group(c
Canvas Header Stamping (#7384) * Strip down the header-stamping PR to the basics. * Serialize groups. * Add groups to result backend meta data. * Fix spelling mistake. * Revert changes to canvas.py * Revert changes to app/base.py * Add stamping implementation to canvas.py * Send task to AMQP with groups. * Successfully pass single group to result. * _freeze_gid dict merge fixed * First draft of the visitor API. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * OptionsVisitor created * Fixed canvas.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test for simple test for chord and fixed chord implementation * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed lint and elements * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type -> isinstance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Redo header stamping (#7341) * _freeze_gid dict merge fixed * OptionsVisitor created * Fixed canvas.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test for simple test for chord and fixed chord implementation * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed lint and elements * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type -> isinstance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Omer Katz <omer.katz@omerkatz.com> * Added stamping mechanism * Manual stamping improved * flake8 fixed * Added subtests * Add comma. * Moved groups to stamps * Fixed chord and added test for that * Strip down the header-stamping PR to the basics. * Serialize groups. * Add groups to result backend meta data. * Fix spelling mistake. * Revert changes to canvas.py * Revert changes to app/base.py * Add stamping implementation to canvas.py * Send task to AMQP with groups. * Successfully pass single group to result. * _freeze_gid dict merge fixed * First draft of the visitor API. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * OptionsVisitor created * Fixed canvas.py * Added test for simple test for chord and fixed chord implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * Fixed lint and elements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * type -> isinstance * Added stamping mechanism * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Manual stamping improved * fail_ci_if_error uncommented * flake8 fixed * Added subtests * Changes * Add comma. * Fixed chord and added test for that * canvas.py fixed * Test chord.py fixed * Fixed stamped_headers * collections import fixed * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * collections import fixed * Update celery/backends/base.py Co-authored-by: Omer Katz <omer.katz@omerkatz.com> * ampq.py fixed * Refrain from using deprecated import path. * Fix test_complex_chain regression. Whenever we stamp a group we need to freeze it first if it wasn't already frozen. Somewhere along the line, the group id changed because we were freezing twice. This commit places the stamping operation after preparing the chain's steps which fixes the problem somehow. We don't know why yet. * Fixed integration tests * Fixed integration tests * Fixed integration tests * Fixed integration tests * Fixed issues with maybe_list. Add documentation * Fixed potential issue with integration tests * Fixed issues with _regen * Fixed issues with _regen * Fixed test_generator issues * Fixed _regen stamping * Fixed _regen stamping * Fixed TimeOut issue * Fixed TimeOut issue * Fixed TimeOut issue * Update docs/userguide/canvas.rst Co-authored-by: Omer Katz <omer.katz@omerkatz.com> * Fixed Couchbase * Better stamping intro * New GroupVisitor example * Adjust documentation. Co-authored-by: Naomi Elstein <naomi.els@omerkatz.com> Co-authored-by: Omer Katz <omer.katz@omerkatz.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Asif Saif Uddin <auvipy@gmail.com> Co-authored-by: Omer Katz <omer.katz@kcg.tech>
on_chord_header_start
1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc
celery
canvas.py
11
12
https://github.com/celery/celery.git
2
46
0
15
73
Python
{ "docstring": "Method that is called on сhord header stamping start.\n\n Arguments:\n chord (chord): chord that is stamped.\n headers (Dict): Partial headers that could be merged with existing headers.\n Returns:\n Dict: headers to update.\n ", "language": "en", "n_whitespaces": 92, "n_words": 32, "vocab_size": 26 }
def on_chord_header_start(self, chord, **header) -> dict: if not isinstance(chord.tasks, group): chord.tasks = group(chord.tasks) return self.on_group_start(chord.tasks, **header)
34,932
151,043
436
freqtrade/freqai/data_drawer.py
105
17
def load_historic_predictions_from_disk(self): exists = self.historic_predictions_path.is_file() if exists: try: with open(self.historic_predictions_path, "rb") as fp: self.historic_predictions = cloudpickle.load(fp) logger.info( f"Found existing historic predictions at {self.full_path}, but beware " "that statistics may be inaccurate if the bot ha
backup historical predictions pickle and load the backup in case of corruption
load_historic_predictions_from_disk
ec76214d023a6c53ffab0af8d43bc5b72b1d66af
freqtrade
data_drawer.py
16
25
https://github.com/freqtrade/freqtrade.git
4
112
0
79
222
Python
{ "docstring": "\n Locate and load a previously saved historic predictions.\n :return: bool - whether or not the drawer was located\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 18 }
def load_historic_predictions_from_disk(self): exists = self.historic_predictions_path.is_file() if exists: try: with open(self.historic_predictions_path, "rb") as fp: self.historic_predictions = cloudpickle.load(fp) logger.info( f"Found existing historic predictions at {self.full_path}, but beware " "that statistics may be inaccurate if the bot has been offline for " "an extended period of time." ) except EOFError: logger.warning( 'Historical prediction file was corrupted. Trying to load backup file.') with open(self.historic_predictions_bkp_path, "rb") as fp: self.historic_predictions = cloudpickle.load(fp) logger.warning('FreqAI successfully loaded the backup historical predictions file.') elif not self.follow_mode: logger.info("Could not find existing historic_predictions, starting from scratch") else: logger.warning( f"Follower could not find historic predictions at {self.full_path} " "sending null values back to strategy" ) return exists
43,713
181,992
52
tests/test_css_parse.py
17
14
def test_background(self): css = stylesheet = Stylesheet() stylesheet.parse(css) styles = stylesheet.rules[0].styles
Namespacing parsing tests into classes
test_background
1103844708c7f3a3bd1fc33cae56eb59209ef6c0
textual
test_css_parse.py
10
9
https://github.com/Textualize/textual.git
1
48
0
15
79
Python
{ "docstring": "#some-widget {\n text: on red;\n }\n ", "language": "en", "n_whitespaces": 31, "n_words": 6, "vocab_size": 6 }
def test_background(self): css = stylesheet = Stylesheet() stylesheet.parse(css) styles = stylesheet.rules[0].styles assert styles.text_background == Color("red", type=ColorType.STANDARD, number=1)
103,210
304,403
489
homeassistant/components/dte_energy_bridge/sensor.py
146
24
def update(self) -> None: try: response = requests.get(self._url, timeout=5) except (requests.exceptions.RequestException, ValueError): _LOGGER.warning( "Could not update status for DTE Energy Bridge (%s)", self._attr_name ) return if response.status_code != HTTPStatus.OK: _LOGGER.warning( "Invalid status_code from DTE Energy Bridge: %s (%s)", response.status_code, self._attr_name, ) return response_split = response.text.split() if len(response_split) != 2: _LOGGER.warning( 'Invalid response from DTE Energy Bridge: "%s" (%s)', response.text, self._attr_name, ) return
Improve entity type hints [d] (#77031)
update
bf7239c25db06f1377a895244a906b43242c9963
core
sensor.py
11
29
https://github.com/home-assistant/core.git
6
141
0
99
234
Python
{ "docstring": "Get the energy usage data from the DTE energy bridge.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 8 }
def update(self) -> None: try: response = requests.get(self._url, timeout=5) except (requests.exceptions.RequestException, ValueError): _LOGGER.warning( "Could not update status for DTE Energy Bridge (%s)", self._attr_name ) return if response.status_code != HTTPStatus.OK: _LOGGER.warning( "Invalid status_code from DTE Energy Bridge: %s (%s)", response.status_code, self._attr_name, ) return response_split = response.text.split() if len(response_split) != 2: _LOGGER.warning( 'Invalid response from DTE Energy Bridge: "%s" (%s)', response.text, self._attr_name, ) return val = float(response_split[0]) # A workaround for a bug in the DTE energy bridge. # The returned value can randomly be in W or kW. Checking for a # a decimal seems to be a reliable way to determine the units. # Limiting to version 1 because version 2 apparently always returns # values in the format 000000.000 kW, but the scaling is Watts # NOT kWatts if self._version == 1 and "." in response_split[0]: self._attr_native_value = val else: self._attr_native_value = val / 1000
36,576
156,131
238
dask/order.py
78
20
def ndependencies(dependencies, dependents): num_needed = {} result = {} for k, v in dependencies.items(): num_needed[k] = len(v) if not v: result[k] = 1 num_dependencies = num_needed.copy() current = [] current_pop = current.pop cur
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
ndependencies
cccb9d8d8e33a891396b1275c2448c352ef40c27
dask
order.py
13
24
https://github.com/dask/dask.git
10
155
0
45
244
Python
{ "docstring": "Number of total data elements on which this key depends\n\n For each key we return the number of tasks that must be run for us to run\n this task.\n\n Examples\n --------\n >>> inc = lambda x: x + 1\n >>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}\n >>> dependencies, dependents = get_deps(dsk)\n >>> num_dependencies, total_dependencies = ndependencies(dependencies, dependents)\n >>> sorted(total_dependencies.items())\n [('a', 1), ('b', 2), ('c', 3)]\n\n Returns\n -------\n num_dependencies: Dict[key, int]\n total_dependencies: Dict[key, int]\n ", "language": "en", "n_whitespaces": 122, "n_words": 77, "vocab_size": 63 }
def ndependencies(dependencies, dependents): num_needed = {} result = {} for k, v in dependencies.items(): num_needed[k] = len(v) if not v: result[k] = 1 num_dependencies = num_needed.copy() current = [] current_pop = current.pop current_append = current.append for key in result: for parent in dependents[key]: num_needed[parent] -= 1 if not num_needed[parent]: current_append(parent) while current: key = current_pop() result[key] = 1 + sum(result[child] for child in dependencies[key]) for parent in dependents[key]: num_needed[parent] -= 1 if not num_needed[parent]: current_append(parent) return num_dependencies, result
39,982
167,375
181
pandas/io/pytables.py
59
13
def validate_attr(self, append) -> None: if append: existing_fields = getattr(self.attrs, self.kind_attr, None) if existing_fields is not None and existing_fields != list(self.values): raise ValueError("appended items do not match existing items in table!") existing_dtype = getattr(self.attrs, self.dtype_attr, None) if existing_dtype is not None an
TYP: some return annotations in pytables.py (#47512)
validate_attr
7d2f9b8d59908fbf57c6453bc41891efbfe981a6
pandas
pytables.py
12
11
https://github.com/pandas-dev/pandas.git
6
78
0
34
124
Python
{ "docstring": "validate that we have the same order as the existing & same dtype", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 11 }
def validate_attr(self, append) -> None: if append: existing_fields = getattr(self.attrs, self.kind_attr, None) if existing_fields is not None and existing_fields != list(self.values): raise ValueError("appended items do not match existing items in table!") existing_dtype = getattr(self.attrs, self.dtype_attr, None) if existing_dtype is not None and existing_dtype != self.dtype: raise ValueError( "appended items dtype do not match existing items dtype in table!" )
Final = _FinalForm('Final', doc="""A special typing construct to indicate that a name cannot be re-assigned or overridden in a subclass. For example: MAX_SIZE: Final = 9000 MAX_SIZE += 1 # Error reported by type checker"""A special typing construct to indicate that a name cannot be re-assigned or overridden in a subclass. For example: MAX_SIZE: Final = 9000 MAX_SIZE +=a subclass. For example:
3,619
20,928
101
pipenv/patched/notpip/_vendor/typing_extensions.py
18
28
def __getitem__(self, parameters): item = typing._type_check(parameters, f'{self._name} accepts only single type') return typing._GenericAlias(self, (item,)) Final = _FinalForm('Final', d
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
__getitem__
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
typing_extensions.py
11
4
https://github.com/pypa/pipenv.git
1
30
3
17
103
Python
{ "docstring": "A special typing construct to indicate that a name\n cannot be re-assigned or overridden in a subclass.\n For example:\n\n MAX_SIZE: Final = 9000\n MAX_SIZE += 1 # Error reported by type checker", "language": "en", "n_whitespaces": 128, "n_words": 32, "vocab_size": 31 }
def __getitem__(self, parameters): item = typing._type_check(parameters, f'{self._name} accepts only single type') return typing._GenericAlias(self, (item,)) Final = _FinalForm('Final', doc=
81,863
277,091
76
keras/utils/tf_utils.py
36
9
def type_spec_from_value(value): if is_extension_type(value): return value._type_spec # pylint: disable=protected-access # Get a TensorSpec for array-like data without # converting the data to a Tensor if hasattr(value, "shape") and hasattr(value, "dtype"): return tf.TensorSpec(value.shape, value
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
type_spec_from_value
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
tf_utils.py
10
7
https://github.com/keras-team/keras.git
4
53
0
28
92
Python
{ "docstring": "Grab type_spec without converting array-likes to tensors.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
def type_spec_from_value(value): if is_extension_type(value): return value._type_spec # pylint: disable=protected-access # Get a TensorSpec for array-like data without # converting the data to a Tensor if hasattr(value, "shape") and hasattr(value, "dtype"): return tf.TensorSpec(value.shape, value.dtype) else: return tf.type_spec_from_value(value)
53,367
212,726
141
DemoPrograms/Demo_User_Settings_Class.py
49
19
def make_window(): sg.theme(settings.get('-theme-', 'DarkBlue2')) # set the theme layout = [[sg.Text('Settings Window')], [sg.Input(settings.get('-input-', ''), k='-IN-')], [sg.Listbox(sg.theme_list(), default_values=[settings['-theme-'],], size=(15, 10), k='-LISTBOX-')], [sg.CB('Option 1', settings.get('-option1-', True), k='-CB1-')], [sg.CB('Option 2', settings.get('-option2-', False), k='-CB2-')], [sg.T('Settings file = ' + settings.get_filename())], [sg.Button('Save'), sg.Button('Settings Dictionar
Catching up on the many many demo programs that were not checked in....
make_window
cfe2c96a1fa6fc721c998179298a7d430ccbaefd
PySimpleGUI
Demo_User_Settings_Class.py
14
10
https://github.com/PySimpleGUI/PySimpleGUI.git
1
181
0
46
304
Python
{ "docstring": "\n Creates a new window. The default values for some elements are pulled directly from the\n \"User Settings\" without the use of temp variables.\n\n Some get_entry calls don't have a default value, such as theme, because there was an initial call\n that would have set the default value if the setting wasn't present. Could still put the default\n value if you wanted but it would be 2 places to change if you wanted a different default value.\n\n Use of a lookup table to map between element keys and user settings could be aded. This demo\n is intentionally done without one to show how to use the settings APIs in the most basic,\n straightforward way.\n\n If your application allows changing the theme, then a make_window function is good to have\n so that you can close and re-create a window easily.\n\n :return: (sg.Window) The window that was created\n ", "language": "en", "n_whitespaces": 185, "n_words": 145, "vocab_size": 103 }
def make_window(): sg.theme(settings.get('-theme-', 'DarkBlue2')) # set the theme layout = [[sg.Text('Settings Window')], [sg.Input(settings.get('-input-', ''), k='-IN-')], [sg.Listbox(sg.theme_list(), default_values=[settings['-theme-'],], size=(15, 10), k='-LISTBOX-')], [sg.CB('Option 1', settings.get('-option1-', True), k='-CB1-')], [sg.CB('Option 2', settings.get('-option2-', False), k='-CB2-')], [sg.T('Settings file = ' + settings.get_filename())], [sg.Button('Save'), sg.Button('Settings Dictionary'), sg.Button('Exit without saving', k='Exit')]] window = sg.Window('A Settings Window', layout)
72,922
249,450
52
synapse/metrics/common_usage_metrics.py
13
7
async def _collect(self) -> CommonUsageMetrics: dau_count = await self._store.count_daily_users() return CommonUsageMetrics( daily_active_users=dau_count, )
Share some metrics between the Prometheus exporter and the phone home stats (#13671)
_collect
898fef2789c9b1a20ef53c7d588f536f51f0fe2f
synapse
common_usage_metrics.py
10
8
https://github.com/matrix-org/synapse.git
1
26
0
13
46
Python
{ "docstring": "Collect the common metrics and either create the CommonUsageMetrics object to\n use if it doesn't exist yet, or update it.\n ", "language": "en", "n_whitespaces": 34, "n_words": 20, "vocab_size": 19 }
async def _collect(self) -> CommonUsageMetrics: dau_count = await self._store.count_daily_users() return CommonUsageMetrics( daily_active_users=dau_count, )
42,702
178,456
260
nuitka/plugins/Plugins.py
65
12
def getPreprocessorSymbols(cls): if cls.preprocessor_symbols is None: cls.preprocessor_symbols = OrderedDict() for plugin in getActivePlugins(): value = plugin.getPreprocessorSymbols() if value is not None: assert type(value) is dict, value # We order per plugin, but from the plugins, lets just take a dict # and achieve determinism by ordering the defines by name. for key, value in sorted(value.items()): # False alarm, pylint
Minor cleanups * Typos and minor problems only
getPreprocessorSymbols
11b0190a5e2d77098b16ff01ae8597428e055f53
Nuitka
Plugins.py
16
10
https://github.com/Nuitka/Nuitka.git
5
75
0
47
124
Python
{ "docstring": "Let plugins provide C defines to be used in compilation.\n\n Notes:\n The plugins can each contribute, but are hopefully using\n a namespace for their defines.\n\n Returns:\n OrderedDict(), where None value indicates no define value,\n i.e. \"-Dkey=value\" vs. \"-Dkey\"\n ", "language": "en", "n_whitespaces": 103, "n_words": 38, "vocab_size": 37 }
def getPreprocessorSymbols(cls): if cls.preprocessor_symbols is None: cls.preprocessor_symbols = OrderedDict() for plugin in getActivePlugins(): value = plugin.getPreprocessorSymbols() if value is not None: assert type(value) is dict, value # We order per plugin, but from the plugins, lets just take a dict # and achieve determinism by ordering the defines by name. for key, value in sorted(value.items()): # False alarm, pylint: disable=I0021,unsupported-assignment-operation cls.preprocessor_symbols[key] = value return cls.preprocessor_symbols
23,267
108,585
117
lib/matplotlib/text.py
38
12
def _check_xy(self, renderer=None): if renderer is None: renderer = self.figure._get_renderer() b = self.get_annotation_clip() if b or (b is None and self.xycoords == "data"):
MNT: make renderer always optional
_check_xy
24b16804731d3a724e4ec0984da140b1a6b05c66
matplotlib
text.py
11
8
https://github.com/matplotlib/matplotlib.git
5
65
0
29
109
Python
{ "docstring": "Check whether the annotation at *xy_pixel* should be drawn.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def _check_xy(self, renderer=None): if renderer is None: renderer = self.figure._get_renderer() b = self.get_annotation_clip() if b or (b is None and self.xycoords == "data"): # check if self.xy is inside the axes. xy_pixel = self._get_position_xy(renderer) return self.axes.contains_point(xy_pixel) return True
78,660
266,931
41
lib/ansible/plugins/connection/__init__.py
20
7
def _split_ssh_args(argstring): # In Python3, shlex.split doesn't work on a byte string. return [to_text(x.strip()) for x in shlex.split(argstring) i
Remove more Python 2.x compatibility code from controller. (#77320)
_split_ssh_args
4baf18c573c17cf9cd5716b28dbf38a32b57aaff
ansible
__init__.py
10
2
https://github.com/ansible/ansible.git
3
32
0
20
55
Python
{ "docstring": "\n Takes a string like '-o Foo=1 -o Bar=\"foo bar\"' and returns a\n list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to\n the argument list. The list will not contain any empty elements.\n ", "language": "en", "n_whitespaces": 63, "n_words": 34, "vocab_size": 32 }
def _split_ssh_args(argstring): # In Python3, shlex.split doesn't work on a byte string. return [to_text(x.strip()) for x in shlex.split(argstring) if x.strip()]