complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
1
image
def image(self) -> ImageTk.PhotoImage: assert self._preview_image_tk is not None return self._preview_image_tk
2e8ef5e3c8f2df0f1cca9b342baa8aaa6f620650
7
image.py
35
GUI - Preview updates - Training preview. Embed preview pop-out window - Bugfix - convert/extract previews
21,352
0
32
21
10
101,978
11
faceswap
5
lib/gui/utils/image.py
Python
4
{ "docstring": ":class:`PIL.ImageTk.PhotoImage` The preview image for displaying in a tkinter canvas ", "language": "en", "n_whitespaces": 10, "n_words": 10, "vocab_size": 10 }
https://github.com/deepfakes/faceswap.git
11
_join_by_index
def _join_by_index(self, other_modin_frames, how, sort, ignore_index): if how == "outer": raise NotImplementedError("outer join is not supported in HDK engine") lhs = self._maybe_materialize_rowid() reset_index_names = False for rhs in other_modin_frames: rhs = rhs._maybe_materialize_rowid() if len(lhs._index_cols) != len(rhs._index_cols): raise NotImplementedError( "join by indexes with different sizes is not supported" ) reset_index_names = reset_index_names or lhs._index_cols != rhs._index_cols condition = lhs._build_equi_join_condition( rhs, lhs._index_cols, rhs._index_cols ) exprs = lhs._index_exprs() new_columns = lhs.columns.to_list() for col in lhs.columns: exprs[col] = lhs.ref(col) for col in rhs.columns: # Handle duplicating column names here. When user specifies # suffixes to make a join, actual renaming is done in front-end. new_col_name = col rename_idx = 0 while new_col_name in exprs: new_col_name = f"{col}{rename_idx}" rename_idx += 1 exprs[new_col_name] = rhs.ref(col) new_columns.append(new_col_name) op = JoinNode( lhs, rhs, how=how, exprs=exprs, condition=condition, ) new_columns = Index.__new__( Index, data=new_columns, dtype=self.columns.dtype ) lhs = lhs.__constructor__( dtypes=lhs._dtypes_for_exprs(exprs), columns=new_columns, index_cols=lhs._index_cols, op=op, force_execution_mode=self._force_execution_mode, ) if sort: lhs = lhs.sort_rows( lhs._index_cols, ascending=True, ignore_index=False, na_position="last", ) if reset_index_names: lhs = lhs._reset_index_names() if ignore_index: new_columns = Index.__new__(RangeIndex, data=range(len(lhs.columns))) lhs = lhs._set_columns(new_columns) return lhs
e5b1888cd932909e49194d58035da34b210b91c4
16
dataframe.py
498
FEAT-#4946: Replace OmniSci with HDK (#4947) Co-authored-by: Iaroslav Igoshev <Poolliver868@mail.ru> Signed-off-by: Andrey Pavlenko <andrey.a.pavlenko@gmail.com>
36,066
0
912
315
113
154,556
171
modin
44
modin/experimental/core/execution/native/implementations/hdk_on_native/dataframe/dataframe.py
Python
57
{ "docstring": "\n Perform equi-join operation for multiple frames by index columns.\n\n Parameters\n ----------\n other_modin_frames : list of HdkOnNativeDataframe\n Frames to join with.\n how : str\n A type of join.\n sort : bool\n Sort the result by join keys.\n ignore_index : bool\n If True then reset column index for the resulting frame.\n\n Returns\n -------\n HdkOnNativeDataframe\n The new frame.\n ", "language": "en", "n_whitespaces": 188, "n_words": 55, "vocab_size": 43 }
https://github.com/modin-project/modin.git
9
_get_suitable_downloader
def _get_suitable_downloader(info_dict, params={}): # if (info_dict.get('start_time') or info_dict.get('end_time')) and not info_dict.get('requested_formats') and FFmpegFD.can_download(info_dict): # return FFmpegFD external_downloader = params.get('external_downloader') if external_downloader is not None: ed = get_external_downloader(external_downloader) if ed.can_download(info_dict): return ed protocol = info_dict['protocol'] if protocol.startswith('m3u8') and info_dict.get('is_live'): return FFmpegFD if protocol == 'm3u8' and params.get('hls_prefer_native') is True: return HlsFD if protocol == 'm3u8_native' and params.get('hls_prefer_native') is False: return FFmpegFD return PROTOCOL_MAP.get(protocol, HttpFD) __all__ = [ 'get_suitable_downloader', 'FileDownloader', ]
92d73ef3936ed6de9770f613fddf2260731becc9
10
__init__.py
200
[niconico] Implement heartbeat for download
22,343
0
153
105
40
106,288
69
youtube-dl
15
youtube_dl/downloader/__init__.py
Python
14
{ "docstring": "Get the downloader class that can handle the info dict.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
https://github.com/ytdl-org/youtube-dl.git
1
test_having_condition_with_preventing_aggregate_metrics_only
def test_having_condition_with_preventing_aggregate_metrics_only(self): response = self.do_request( { "field": ["transaction", "project", "p50(transaction.duration)"], "query": "event.type:transaction p50(transaction.duration):<50", "dataset": "metrics", "preventMetricAggregates": "1", "per_page": 50, "project": self.project.id, } ) assert response.status_code == 400, response.content
d3b8c9dd7bef6bccb5e70d2ccf3cda8463444a34
12
test_organization_events_mep.py
109
chore(discover): Cleanup events tests (#36797) - Delete the deprecated eventsv2 tests - Move MEP tests to its own file
18,970
0
168
59
28
93,148
28
sentry
8
tests/snuba/api/endpoints/test_organization_events_mep.py
Python
12
{ "docstring": "same as the previous test, but with the dataset on explicit metrics\n which should throw a 400 error instead", "language": "en", "n_whitespaces": 25, "n_words": 19, "vocab_size": 18 }
https://github.com/getsentry/sentry.git
4
test_unknown_sequence_followed_by_known_sequence
def test_unknown_sequence_followed_by_known_sequence(parser, chunk_size): unknown_sequence = "\x1b[?" known_sequence = "\x1b[8~" # key = 'end' sequence = unknown_sequence + known_sequence events = [] parser.more_data = lambda: True for chunk in chunks(sequence, chunk_size): events.append(parser.feed(chunk)) events = list(itertools.chain.from_iterable(list(event) for event in events)) assert [event.key for event in events] == [ "circumflex_accent", "left_square_bracket", "question_mark", "end", ]
3f0955cbe5405bdb3d1dda756ee3a1e000695dff
12
test_xterm_parser.py
156
fix tests
44,934
0
117
92
36
185,184
51
textual
18
tests/test_xterm_parser.py
Python
15
{ "docstring": "When we feed the parser an unknown sequence followed by a known\n sequence. The characters in the unknown sequence are delivered as keys,\n and the known escape sequence that follows is delivered as expected.\n ", "language": "en", "n_whitespaces": 43, "n_words": 34, "vocab_size": 26 }
https://github.com/Textualize/textual.git
1
test_block_tag
def test_block_tag(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1( date_field=datetime.date(2012, 1, 1), **FOUR_CHARS ) ex1.save() # This template does not render anything as content is saved in a # variable and never inserted in the page template_text = request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertNotContains( response, '<template class="cms-plugin cms-plugin-start cms-plugin-{0}-{1}-{2} cms-render-model-icon"></template>' '<img src="/static/cms/img/toolbar/render_model_icon.png">' '<template class="cms-plugin cms-plugin-end cms-plugin-{0}-{1}-{2} cms-render-model-icon">' '</template>'.format( 'placeholderapp', 'example1', ex1.pk ) ) # This template does not render anything as content is saved in a # variable and inserted in the page afterwards template_text = request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) # Assertions on the content of the block tag self.assertContains( response, '<template class="cms-plugin cms-plugin-start cms-plugin-{}-{}-{} ' 'cms-render-model cms-render-model-block">'.format( 'placeholderapp', 'example1', ex1.pk ) ) self.assertContains(response, f'<h1>{ex1.char_1} - {ex1.char_2}</h1>') self.assertContains(response, '<span class="date">%s</span>' % (ex1.date_field.strftime("%Y"))) self.assertContains( response, '<a href="%s">successful if</a>\n \n<template' % (reverse('detail', args=(ex1.pk,))) ) # This template is rendered directly template_text = request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) # Assertions on the content of the block tag self.assertContains( response, '<template class="cms-plugin cms-plugin-start cms-plugin-{}-{}-{} cms-render-model ' 'cms-render-model-block">'.format( 'placeholderapp', 'example1', ex1.pk ) ) self.assertContains(response, f'<h1>{ex1.char_1} - {ex1.char_2}</h1>') self.assertContains(response, '<span class="date">%s</span>' % (ex1.date_field.strftime("%Y"))) self.assertContains( response, '<a href="%s">successful if</a>\n \n<template' % (reverse('detail', args=(ex1.pk,))) ) # Changelist check template_text = request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) # Assertions on the content of the block tag self.assertContains( response, '<template class="cms-plugin cms-plugin-start cms-plugin-{}-{}-changelist-{} cms-render-model ' 'cms-render-model-block"></template>'.format( 'placeholderapp', 'example1', ex1.pk ) ) self.assertContains( response, "edit_plugin: '{}?language={}&amp;edit_fields=changelist'".format( admin_reverse('placeholderapp_example1_changelist'), 'en' ) )
b8750ebc0ebaa52ec51945f1d4824a80d806f479
14
test_toolbar.py
672
ci: sync isort line length (#7353)
17,440
0
921
379
97
82,563
254
django-cms
31
cms/tests/test_toolbar.py
Python
114
{ "docstring": "{% extends \"base.html\" %}\n{% load cms_tags %}\n\n{% block content %}\n{% render_model_block instance as rendered_model %}\n {{ instance }}\n <h1>{{ instance.char_1 }} - {{ instance.char_2 }}</h1>\n {{ instance.date_field|date:\"Y\" }}\n {% if instance.char_1 %}\n <a href=\"{% url 'detail' instance.pk %}\">successful if</a>\n {% endif %}\n{% endrender_model_block %}\n{% endblock content %}\n{% extends \"base.html\" %}\n{% load cms_tags %}\n\n{% block content %}\n{% render_model_block instance as rendered_model %}\n {{ instance }}\n <h1>{{ instance.char_1 }} - {{ instance.char_2 }}</h1>\n <span class=\"date\">{{ instance.date_field|date:\"Y\" }}</span>\n {% if instance.char_1 %}\n <a href=\"{% url 'detail' instance.pk %}\">successful if</a>\n {% endif %}\n{% endrender_model_block %}\n{{ rendered_model }}\n{% endblock content %}\n{% extends \"base.html\" %}\n{% load cms_tags %}\n\n{% block content %}\n{% render_model_block instance %}\n {{ instance }}\n <h1>{{ instance.char_1 }} - {{ instance.char_2 }}</h1>\n <span class=\"date\">{{ instance.date_field|date:\"Y\" }}</span>\n {% if instance.char_1 %}\n <a href=\"{% url 'detail' instance.pk %}\">successful if</a>\n {% endif %}\n{% endrender_model_block %}\n{% endblock content %}\n{% extends \"base.html\" %}\n{% load cms_tags %}\n\n{% block content %}\n{% render_model_block instance 'changelist' %}\n {{ instance }}\n{% endrender_model_block %}\n{% endblock content %}\n", "language": "en", "n_whitespaces": 221, "n_words": 186, "vocab_size": 35 }
https://github.com/django-cms/django-cms.git
1
test_connect_more_newlines
def test_connect_more_newlines(tctx): server = Placeholder(Server) playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular)) nl = Placeholder(layer.NextLayer) assert ( playbook >> DataReceived(tctx.client, b"CONNECT example.com:80 HTTP/1.1\r\n\r\n\r\n") << http.HttpConnectHook(Placeholder()) >> reply() << OpenConnection(server) >> reply(None) << SendData(tctx.client, b"HTTP/1.1 200 Connection established\r\n\r\n") >> DataReceived(tctx.client, b"\x16\x03\x03\x00\xb3\x01\x00\x00\xaf\x03\x03") << layer.NextLayerHook(nl) ) assert nl().data_client() == b"\x16\x03\x03\x00\xb3\x01\x00\x00\xaf\x03\x03"
b3587b52b25077f68116b9852b041d33e7fc6601
17
test_http.py
212
make it black!
73,851
0
128
118
33
251,848
44
mitmproxy
22
test/mitmproxy/proxy/layers/http/test_http.py
Python
16
{ "docstring": "Ignore superfluous \\r\\n in CONNECT request, https://github.com/mitmproxy/mitmproxy/issues/4870", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/mitmproxy/mitmproxy.git
1
set_as_test_mirror
def set_as_test_mirror(self, primary_settings_dict): self.connection.settings_dict["NAME"] = primary_settings_dict["NAME"]
9c19aff7c7561e3a82978a272ecdaad40dda5c00
9
creation.py
38
Refs #33476 -- Reformatted code with Black.
50,921
0
20
21
6
204,842
6
django
5
django/db/backends/base/creation.py
Python
2
{ "docstring": "\n Set this database up to be used in testing as a mirror of a primary\n database whose settings are given.\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 18 }
https://github.com/django/django.git
1
themed_values
def themed_values(self) -> dict[str, Any] | None: return getattr(self, '__themed_values__', None)
528d85e642340ef30ec91f30b65c7c43370f648d
8
has_props.py
39
Normalize built-in types and remove `Unknown` (#12252) * Use lower case names for built-in types Also incidentally apply TypeAlias marker. * Drop `Unknown` in favour of consistent usage of `Any` * Enable lazy annotations in conftest.py
53,247
0
25
24
11
212,474
11
bokeh
6
bokeh/core/has_props.py
Python
11
{ "docstring": " Get any theme-provided overrides.\n\n Results are returned as a dict from property name to value, or\n ``None`` if no theme overrides any values for this instance.\n\n Returns:\n dict or None\n\n ", "language": "en", "n_whitespaces": 70, "n_words": 30, "vocab_size": 27 }
https://github.com/bokeh/bokeh.git
4
is_cgi
def is_cgi(self): collapsed_path = _url_collapse_path(self.path) dir_sep = collapsed_path.find('/', 1) while dir_sep > 0 and not collapsed_path[:dir_sep] in self.cgi_directories: dir_sep = collapsed_path.find('/', dir_sep+1) if dir_sep > 0: head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] self.cgi_info = head, tail return True return False cgi_directories = ['/cgi-bin', '/htbin']
8198943edd73a363c266633e1aa5b2a9e9c9f526
12
server.py
153
add python 3.10.4 for windows
54,974
0
132
85
30
217,867
43
XX-Net
11
python3.10.4/Lib/http/server.py
Python
10
{ "docstring": "Test whether self.path corresponds to a CGI script.\n\n Returns True and updates the cgi_info attribute to the tuple\n (dir, rest) if self.path requires running a CGI script.\n Returns False otherwise.\n\n If any exception is raised, the caller should assume that\n self.path was rejected as invalid and act accordingly.\n\n The default implementation tests whether the normalized url\n path begins with one of the strings in self.cgi_directories\n (and the next character is a '/' or the end of the string).\n\n ", "language": "en", "n_whitespaces": 141, "n_words": 78, "vocab_size": 59 }
https://github.com/XX-net/XX-Net.git
7
to_euler
def to_euler(self, seq): extrinsic = _check_sequence(seq) i, j, k = seq.lower() i = _elementary_axis_index(i) j = _elementary_axis_index(j) k = _elementary_axis_index(k) if not extrinsic: i, k = k, i # check if sequence is symmetric symmetric = i == k if symmetric: k = 6 - i - j # parity of the permutation sign = (i - j) * (j - k) * (k - i) // 2 # permutate elements elements = [self.a, self.b, self.c, self.d] a = elements[0] b = elements[i] c = elements[j] d = elements[k] * sign if not symmetric: a, b, c, d = a - c, b + d, c + a, d - b # calculate angles half_sum = atan2(b, a) half_diff = atan2(d, c) angle_2 = 2*atan2(sqrt(c*c + d*d), sqrt(a*a + b*b)) # alternatively, we can use this to avoid the square root: # angle_2 = acos(2*(a*a + b*b)/(a*a + b*b + c*c + d*d) - 1) angle_1 = half_sum + half_diff angle_3 = half_sum - half_diff if extrinsic: angle_1, angle_3 = angle_3, angle_1 # for Tait-Bryan angles if not symmetric: angle_2 -= pi / 2 if extrinsic: angle_3 *= sign else: angle_1 *= sign return Matrix([angle_1, angle_2, angle_3])
69baa8d90fe079b799a80c8c06735c3ebd4bfe33
13
quaternion.py
404
added reference
49,727
0
521
258
104
200,603
197
sympy
26
sympy/algebras/quaternion.py
Python
33
{ "docstring": "Returns Euler angles representing same in the sequence given by\n `seq`. This implements the method described in [1]_.\n\n Parameters\n ==========\n\n seq : string of length 3\n Represents the sequence of rotations.\n For intrinsic rotations, seq but be all lowercase and its elements\n must be from the set `['x', 'y', 'z']`\n For extrinsic rotations, seq but be all uppercase and its elements\n must be from the set `['X', 'Y', 'Z']`\n\n Returns\n =======\n\n Matrix\n The Euler angles calculated from the quaternion\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy.abc import a, b, c, d\n >>> euler = Quaternion(a, b, c, d).to_euler('zyz')\n >>> euler\n Matrix([[-atan2(-b, c) + atan2(d, a)],\n [2*atan2(sqrt(b**2 + c**2), sqrt(a**2 + d**2))],\n [atan2(-b, c) + atan2(d, a)]])\n\n References\n ==========\n\n .. [1] https://doi.org/10.1371/journal.pone.0276302\n\n ", "language": "en", "n_whitespaces": 346, "n_words": 124, "vocab_size": 82 }
https://github.com/sympy/sympy.git
2
quantiles
def quantiles(self) -> np.ndarray: if not self.count: return np.ndarray([], dtype=np.float32) else: return np.nanpercentile( self.items[: self.count], [0, 10, 50, 90, 100] ).tolist()
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
15
window_stat.py
97
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
33,098
0
90
63
20
144,040
21
ray
10
rllib/utils/metrics/window_stat.py
Python
8
{ "docstring": "Returns ndarray with 0, 10, 50, 90, and 100 percentiles.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/ray-project/ray.git
1
test_copy_with_credential
def test_copy_with_credential(self): expression = "col1, col2" op = DatabricksCopyIntoOperator( file_location=COPY_FILE_LOCATION, file_format='CSV', table_name='test', task_id=TASK_ID, expression_list=expression, credential={'AZURE_SAS_TOKEN': 'abc'}, ) assert ( op._create_sql_query() == f.strip() )
401419432082d222b823e4f2a66f21e5cc3ab28d
12
test_databricks_sql.py
102
Add new options to DatabricksCopyIntoOperator (#22076) This includes: * `encryption` - to specify encryption options for a given location * `credential` - to specify authentication options for a given location * `validate` - to control validation of schema & data
8,743
0
145
56
21
45,916
23
airflow
15
tests/providers/databricks/operators/test_databricks_sql.py
Python
17
{ "docstring": "COPY INTO test\nFROM (SELECT {expression} FROM '{COPY_FILE_LOCATION}' WITH (CREDENTIAL (AZURE_SAS_TOKEN = 'abc') ))\nFILEFORMAT = CSV\n", "language": "en", "n_whitespaces": 14, "n_words": 17, "vocab_size": 15 }
https://github.com/apache/airflow.git
1
test_press_button
async def test_press_button(hass): helper = await setup_test_component(hass, create_switch_with_setup_button) # Helper will be for the primary entity, which is the outlet. Make a helper for the button. button = Helper( hass, "button.testdevice_setup", helper.pairing, helper.accessory, helper.config_entry, ) await hass.services.async_call( "button", "press", {"entity_id": "button.testdevice_setup"}, blocking=True, ) button.async_assert_service_values( ServicesTypes.OUTLET, { CharacteristicsTypes.Vendor.HAA_SETUP: "#HAA@trcmd", }, )
58b8c30221a6f6e5acbbe98b7e3298b03fb741f5
11
test_button.py
129
Improve homekit_controller tests (#65266)
110,119
0
172
78
42
311,454
50
core
19
tests/components/homekit_controller/test_button.py
Python
21
{ "docstring": "Test a switch service that has a button characteristic is correctly handled.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
https://github.com/home-assistant/core.git
1
describe_an_sg_2
def describe_an_sg_2(ec2_client_stub, security_group): ec2_client_stub.add_response( "describe_security_groups", expected_params={"GroupIds": [security_group["GroupId"]]}, service_response={"SecurityGroups": [security_group]}, )
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
13
stubs.py
66
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,476
0
40
38
10
131,108
10
ray
6
python/ray/tests/aws/utils/stubs.py
Python
6
{ "docstring": "Same as last function, different input param format.\n\n A call with this input parameter format is made when sg.ip_permissions is\n accessed in aws/config.py.\n ", "language": "en", "n_whitespaces": 32, "n_words": 23, "vocab_size": 21 }
https://github.com/ray-project/ray.git
1
wait_for_style_to_equal
def wait_for_style_to_equal(self, selector, style, val, timeout=None): return self._wait_for( method=style_to_equal, args=(selector, style, val), timeout=timeout, msg=f"style val => {style} {val} not found within {timeout or self._wait_timeout}s", )
c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c
12
browser.py
81
f-strings everywhere! fffff
7,360
0
90
45
24
40,221
25
dash
12
dash/testing/browser.py
Python
7
{ "docstring": "Explicit wait until the element's style has expected `value` timeout\n if not set, equals to the fixture's `wait_timeout` shortcut to\n `WebDriverWait` with customized `style_to_equal` condition.", "language": "en", "n_whitespaces": 38, "n_words": 25, "vocab_size": 23 }
https://github.com/plotly/dash.git
17
get_rendered_html_form
def get_rendered_html_form(self, data, view, method, request): # See issue #2089 for refactoring this. serializer = getattr(data, 'serializer', None) if serializer and not getattr(serializer, 'many', False): instance = getattr(serializer, 'instance', None) if isinstance(instance, Page): instance = None else: instance = None # If this is valid serializer data, and the form is for the same # HTTP method as was used in the request then use the existing # serializer instance, rather than dynamically creating a new one. if request.method == method and serializer is not None: try: kwargs = {'data': request.data} except ParseError: kwargs = {} existing_serializer = serializer else: kwargs = {} existing_serializer = None with override_method(view, request, method) as request: if not self.show_form_for_method(view, method, request, instance): return if method in ('DELETE', 'OPTIONS'): return True # Don't actually need to return a form has_serializer = getattr(view, 'get_serializer', None) has_serializer_class = getattr(view, 'serializer_class', None) if ( (not has_serializer and not has_serializer_class) or not any(is_form_media_type(parser.media_type) for parser in view.parser_classes) ): return if existing_serializer is not None: with contextlib.suppress(TypeError): return self.render_form_for_serializer(existing_serializer) if has_serializer: if method in ('PUT', 'PATCH'): serializer = view.get_serializer(instance=instance, **kwargs) else: serializer = view.get_serializer(**kwargs) else: # at this point we must have a serializer_class if method in ('PUT', 'PATCH'): serializer = self._get_serializer(view.serializer_class, view, request, instance=instance, **kwargs) else: serializer = self._get_serializer(view.serializer_class, view, request, **kwargs) return self.render_form_for_serializer(serializer)
c10f2266222c434485889b08cc1463acdb8fa169
17
renderers.py
503
Refactor: Replace try/except with contextlib.suppress() (#8676)
9,563
0
902
308
111
48,672
215
django-rest-framework
30
rest_framework/renderers.py
Python
45
{ "docstring": "\n Return a string representing a rendered HTML form, possibly bound to\n either the input or output data.\n\n In the absence of the View having an associated form then return None.\n ", "language": "en", "n_whitespaces": 59, "n_words": 30, "vocab_size": 27 }
https://github.com/encode/django-rest-framework.git
5
get_expected_metric_variable_names
def get_expected_metric_variable_names(var_names, name_suffix=""): if tf.__internal__.tf2.enabled() or tf.executing_eagerly(): # In V1 eager mode and V2 variable names are not made unique. return [n + ":0" for n in var_names] # In V1 graph mode variable names are made unique using a suffix. return [n + name_suffix + ":0" for n in var_names]
84afc5193d38057e2e2badf9c889ea87d80d8fbf
10
test_utils.py
85
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,647
0
77
49
34
276,385
51
keras
9
keras/testing_infra/test_utils.py
Python
4
{ "docstring": "Returns expected metric variable names given names and prefix/suffix.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
https://github.com/keras-team/keras.git
1
test_incorrect_lookup_parameters
def test_incorrect_lookup_parameters(self): changelist_url = reverse("admin:admin_views_thing_changelist") response = self.client.get(changelist_url, {"notarealfield": "5"}) self.assertRedirects(response, "%s?e=1" % changelist_url) # Spanning relationships through a nonexistent related object (Refs #16716) response = self.client.get(changelist_url, {"notarealfield__whatever": "5"}) self.assertRedirects(response, "%s?e=1" % changelist_url) response = self.client.get( changelist_url, {"color__id__exact": "StringNotInteger!"} ) self.assertRedirects(response, "%s?e=1" % changelist_url) # Regression test for #18530 response = self.client.get(changelist_url, {"pub_date__gte": "foo"}) self.assertRedirects(response, "%s?e=1" % changelist_url)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
tests.py
207
Refs #33476 -- Reformatted code with Black.
52,112
0
160
116
35
207,811
58
django
8
tests/admin_views/tests.py
Python
12
{ "docstring": "Ensure incorrect lookup parameters are handled gracefully.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/django/django.git
2
load_items
def load_items(self) -> Dict[str, List[int]]: faces: Dict[str, List[int]] = {} for face in cast(List[Tuple[str, "PNGHeaderDict"]], self.file_list_sorted): src = face[1]["source"] faces.setdefault(src["source_filename"], []).append(src["face_index"]) logger.trace(faces) # type: ignore return faces
c79175cbde5600bebd65785f3821fc74b3a80cbe
13
media.py
138
Alignments Tool updates - Copy info back to alignments file from faces
21,163
0
85
87
25
101,759
27
faceswap
16
tools/alignments/media.py
Python
14
{ "docstring": " Load the face names into dictionary.\n\n Returns\n -------\n dict\n The source filename as key with list of face indices for the frame as value\n ", "language": "en", "n_whitespaces": 64, "n_words": 24, "vocab_size": 21 }
https://github.com/deepfakes/faceswap.git
4
row_join
def row_join(self, other): # A null matrix can always be stacked (see #10770) if self.cols == 0 and self.rows != other.rows: return self._new(other.rows, 0, []).row_join(other) if self.rows != other.rows: raise ShapeError( "The matrices have incompatible number of rows ({} and {})" .format(self.rows, other.rows)) return self._eval_row_join(other)
9b2351534f8f02bcd5b9691d5e7a06150685beca
12
common.py
118
Make ShapeError more readable
49,553
0
133
74
39
200,153
45
sympy
9
sympy/matrices/common.py
Python
8
{ "docstring": "Concatenates two matrices along self's last and rhs's first column\n\n Examples\n ========\n\n >>> from sympy import zeros, ones\n >>> M = zeros(3)\n >>> V = ones(3, 1)\n >>> M.row_join(V)\n Matrix([\n [0, 0, 0, 1],\n [0, 0, 0, 1],\n [0, 0, 0, 1]])\n\n See Also\n ========\n\n row\n col_join\n ", "language": "en", "n_whitespaces": 152, "n_words": 47, "vocab_size": 34 }
https://github.com/sympy/sympy.git
1
test_permission_with_proxy_content_type_created
def test_permission_with_proxy_content_type_created(self): opts = UserProxy._meta codename = get_permission_codename("add", opts) self.assertTrue( Permission.objects.filter( content_type__model=opts.model_name, content_type__app_label=opts.app_label, codename=codename, ).exists() )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
test_management.py
83
Refs #33476 -- Reformatted code with Black.
49,919
0
118
51
15
201,335
16
django
16
tests/auth_tests/test_management.py
Python
10
{ "docstring": "\n A proxy model's permissions use its own content type rather than the\n content type of the concrete model.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 15 }
https://github.com/django/django.git
2
get_bboxes_list
def get_bboxes_list(end2end_result, structure_master_result): # end2end end2end_xyxy_list = [] end2end_xywh_list = [] for end2end_item in end2end_result: src_bbox = end2end_item['bbox'] end2end_xyxy_list.append(src_bbox) xywh_bbox = xyxy2xywh(src_bbox) end2end_xywh_list.append(xywh_bbox) end2end_xyxy_bboxes = np.array(end2end_xyxy_list) end2end_xywh_bboxes = np.array(end2end_xywh_list) # structure master src_bboxes = structure_master_result['bbox'] src_bboxes = remove_empty_bboxes(src_bboxes) # structure_master_xywh_bboxes = src_bboxes # xyxy_bboxes = xywh2xyxy(src_bboxes) # structure_master_xyxy_bboxes = xyxy_bboxes structure_master_xyxy_bboxes = src_bboxes xywh_bbox = xyxy2xywh(src_bboxes) structure_master_xywh_bboxes = xywh_bbox return end2end_xyxy_bboxes, end2end_xywh_bboxes, structure_master_xywh_bboxes, structure_master_xyxy_bboxes
ddaa2c2552e19635cd6cdf38619f1f176c358f89
10
table_master_match.py
159
add SLANet
4,761
0
143
93
37
24,519
64
PaddleOCR
18
ppstructure/table/table_master_match.py
Python
16
{ "docstring": "\n This function is use to convert end2end results and structure master results to\n List of xyxy bbox format and List of xywh bbox format\n :param end2end_result: bbox's format is xyxy\n :param structure_master_result: bbox's format is xywh\n :return: 4 kind list of bbox ()\n ", "language": "en", "n_whitespaces": 62, "n_words": 43, "vocab_size": 26 }
https://github.com/PaddlePaddle/PaddleOCR.git
6
make_modal
def make_modal(self): if not self._is_window_created('tried Window.make_modal'): return if running_mac() and ENABLE_MAC_MODAL_DISABLE_PATCH: return # if modal windows have been disabled globally if not DEFAULT_MODAL_WINDOWS_ENABLED: return try: self.TKroot.transient() self.TKroot.grab_set() self.TKroot.focus_force() except Exception as e: print('Exception trying to make modal', e)
ef3746cb06a9ee6bc93bc3c163ba961fd1b9c413
11
PySimpleGUI.py
116
set_options - new parm disable_modal_windows provides ability to disable modal setting for all windows including popups
53,310
0
164
63
32
212,647
38
PySimpleGUI
13
PySimpleGUI.py
Python
13
{ "docstring": "\n Makes a window into a \"Modal Window\"\n This means user will not be able to interact with other windows until this one is closed\n\n NOTE - Sorry Mac users - you can't have modal windows.... lobby your tkinter Mac devs\n ", "language": "en", "n_whitespaces": 69, "n_words": 40, "vocab_size": 37 }
https://github.com/PySimpleGUI/PySimpleGUI.git
1
set_interpret_parameters
def set_interpret_parameters(self, segments=8): self.interpretation_segments = segments return self
9e4541822770333ab5191bc01aa3edc9738f17ff
7
components.py
29
Blocks-Components - move audio component
43,002
0
29
17
8
179,703
8
gradio
4
gradio/components.py
Python
3
{ "docstring": "\n Calculates interpretation score of audio subsections by splitting the audio into subsections, then using a \"leave one out\" method to calculate the score of each subsection by removing the subsection and measuring the delta of the output value.\n Parameters:\n segments (int): Number of interpretation segments to split audio into.\n ", "language": "en", "n_whitespaces": 78, "n_words": 49, "vocab_size": 34 }
https://github.com/gradio-app/gradio.git
3
with_csv_dialect
def with_csv_dialect(name, **kwargs) -> Iterator[None]: import csv _BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"} if name in _BUILTIN_DIALECTS: raise ValueError("Cannot override builtin dialect.") csv.register_dialect(name, **kwargs) try: yield finally: csv.unregister_dialect(name) @contextmanager
f538568afc2c76c2d738d32e3544cf9fe6742960
@contextmanager
10
contexts.py
103
TYP: misc return type annotations (#47558)
40,048
1
69
55
27
167,592
28
pandas
10
pandas/_testing/contexts.py
Python
28
{ "docstring": "\n Context manager to temporarily register a CSV dialect for parsing CSV.\n\n Parameters\n ----------\n name : str\n The name of the dialect.\n kwargs : mapping\n The parameters for the dialect.\n\n Raises\n ------\n ValueError : the name of the dialect conflicts with a builtin one.\n\n See Also\n --------\n csv : Python's CSV library.\n ", "language": "en", "n_whitespaces": 102, "n_words": 51, "vocab_size": 36 }
https://github.com/pandas-dev/pandas.git
6
adjust_legend_subtitles
def adjust_legend_subtitles(legend): # Legend title not in rcParams until 3.0 font_size = plt.rcParams.get("legend.title_fontsize", None) hpackers = legend.findobj(mpl.offsetbox.VPacker)[0].get_children() for hpack in hpackers: draw_area, text_area = hpack.get_children() handles = draw_area.get_children() if not all(artist.get_visible() for artist in handles): draw_area.set_width(0) for text in text_area.get_children(): if font_size is not None: text.set_size(font_size)
6460a21555ba6557e1f6f06f4d677d9c19148169
15
utils.py
165
Workaround for matplotlib rc_context issue (#2925) * Workaround for matplotlib rc_context issue Fixes #2914 * Add some additional comments about this workaround
7,477
0
138
100
34
42,077
46
seaborn
22
seaborn/utils.py
Python
11
{ "docstring": "\n Make invisible-handle \"subtitles\" entries look more like titles.\n\n Note: This function is not part of the public API and may be changed or removed.\n\n ", "language": "en", "n_whitespaces": 34, "n_words": 24, "vocab_size": 24 }
https://github.com/mwaskom/seaborn.git
2
_prepare_skip_target_masks
def _prepare_skip_target_masks(self): return [l is None for l in self.loss_functions]
84afc5193d38057e2e2badf9c889ea87d80d8fbf
8
training_v1.py
30
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,930
0
24
18
10
272,002
10
keras
4
keras/engine/training_v1.py
Python
2
{ "docstring": "Boolean mask for whether the target in the output list should be skipped.\n\n If the loss function corresponding to a model output is None, then this\n output will be skipped during total loss calculation and feed targets\n preparation.\n\n Returns:\n A boolean list for whether the corresponding target in the output list\n should be skipped during loss calculation.\n ", "language": "en", "n_whitespaces": 110, "n_words": 57, "vocab_size": 36 }
https://github.com/keras-team/keras.git
4
update_terminal
def update_terminal(): poetry_hash = sha256sum("poetry.lock") completed_process = subprocess.run("git pull", shell=True, check=False) # nosec if completed_process.returncode != 0: return completed_process.returncode new_poetry_hash = sha256sum("poetry.lock") if poetry_hash == new_poetry_hash: console.print("Great, seems like poetry hasn't been updated!") return completed_process.returncode console.print( "Seems like more modules have been added, grab a coke, this may take a while." ) completed_process = subprocess.run( "poetry install", shell=True, check=False ) # nosec if completed_process.returncode != 0: return completed_process.returncode return 0
82747072c511beb1b2672846ae2ee4aec53eb562
10
terminal_helper.py
163
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt> Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: james <jmaslek11@gmail.com> Co-authored-by: jose-donato <zmcdonato@gmail.com>
83,890
0
150
94
46
281,593
70
OpenBBTerminal
12
gamestonk_terminal/terminal_helper.py
Python
18
{ "docstring": "Updates the terminal by running git pull in the directory. Runs poetry install if needed", "language": "en", "n_whitespaces": 15, "n_words": 15, "vocab_size": 14 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
test_serialize_post_error_mock
def test_serialize_post_error_mock(self, stream_mock): obj = QPoint() stream_mock.__lshift__.side_effect = lambda _other: self._set_status( stream_mock, QDataStream.Status.ReadCorruptData) with pytest.raises(OSError, match="The data stream has read corrupt " "data."): qtutils.serialize_stream(stream_mock, obj) assert stream_mock.__lshift__.called_once_with(obj)
0877fb0d78635692e481c8bde224fac5ad0dd430
12
test_qtutils.py
108
Run scripts/dev/rewrite_enums.py
117,730
0
125
64
26
321,448
27
qutebrowser
19
tests/unit/utils/test_qtutils.py
Python
8
{ "docstring": "Test serialize_stream with an error while serializing.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/qutebrowser/qutebrowser.git
5
_validate_fill_value
def _validate_fill_value(self, value): dtype = self.dtype if isinstance(dtype, np.dtype) and dtype.kind not in ["m", "M"]: try: return np_can_hold_element(dtype, value) except ValueError as err: # re-raise as TypeError for consistency raise TypeError from err if not can_hold_element(self._values, value): raise TypeError return value
3977d7335f0792c012013e3b459a7950dfd31a7d
11
base.py
109
REF: consolidate _validate_fill_logic in np_can_hold_element (#45216)
39,405
0
154
67
33
163,219
41
pandas
13
pandas/core/indexes/base.py
Python
10
{ "docstring": "\n Check if the value can be inserted into our array without casting,\n and convert it to an appropriate native type if necessary.\n\n Raises\n ------\n TypeError\n If the value cannot be inserted into an array of this dtype.\n ", "language": "en", "n_whitespaces": 91, "n_words": 37, "vocab_size": 29 }
https://github.com/pandas-dev/pandas.git
7
__anext__
async def __anext__(self): if isinstance(self.iterator, Iterator): if not self._iterate_sync_in_thread:
5a0830cfb6bfa33dcffb38681f86efe5f6f0f97c
9
helper.py
39
refactor: avoid run in executor creating threads (#5518)
2,797
0
38
108
8
14,004
9
jina
6
jina/serve/stream/helper.py
Python
27
{ "docstring": "\n An `Iterator` indicates \"blocking\" code, which might block all tasks in the event loop.\n Hence we iterate in the default executor provided by asyncio.\n ", "language": "en", "n_whitespaces": 58, "n_words": 24, "vocab_size": 22 }
https://github.com/jina-ai/jina.git
6
__new__
def __new__(cls, name, bases, ns, total=True): # Create new typed dict class object. # This method is called directly when TypedDict is subclassed, # or via _typeddict_new when TypedDict is instantiated. This way # TypedDict supports all three syntaxes described in its docstring. # Subclasses and instances of TypedDict return actual dictionaries # via _dict_new. ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new tp_dict = super().__new__(cls, name, (dict,), ns) annotations = {} own_annotations = ns.get('__annotations__', {}) own_annotation_keys = set(own_annotations.keys()) msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" own_annotations = { n: typing._type_check(tp, msg) for n, tp in own_annotations.items() } required_keys = set() optional_keys = set() for base in bases: annotations.update(base.__dict__.get('__annotations__', {})) required_keys.update(base.__dict__.get('__required_keys__', ())) optional_keys.update(base.__dict__.get('__optional_keys__', ())) annotations.update(own_annotations) if total: required_keys.update(own_annotation_keys) else: optional_keys.update(own_annotation_keys) tp_dict.__annotations__ = annotations tp_dict.__required_keys__ = frozenset(required_keys) tp_dict.__optional_keys__ = frozenset(optional_keys) if not hasattr(tp_dict, '__total__'): tp_dict.__total__ = total return tp_dict __instancecheck__ = __subclasscheck__ = _check_fails TypedDict = _TypedDictMeta('TypedDict', (dict,), {}) TypedDict.__module__ = __name__ TypedDict.__doc__ = \
f3166e673fe8d40277b804d35d77dcdb760fc3b3
"""A simple typed name space. At runtimeequivalent to a plain dict. TypedDict creates a dictionary type that expects all ofto a plain dictinstances to have a certain set ofwith eachassociated with a value of a consistent type. This expectation is not checked at runtime but is only enforced by type checkers. Usage::a value of a consistent type. Thisat runtime butonly enforced by type
12
typing_extensions.py
495
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,614
9
565
221
116
20,914
162
pipenv
80
pipenv/patched/notpip/_vendor/typing_extensions.py
Python
27
{ "docstring": "A simple typed name space. At runtime it is equivalent to a plain dict.\n\n TypedDict creates a dictionary type that expects all of its\n instances to have a certain set of keys, with each key\n associated with a value of a consistent type. This expectation\n is not checked at runtime but is only enforced by type checkers.\n Usage::\n", "language": "en", "n_whitespaces": 92, "n_words": 58, "vocab_size": 46 }
https://github.com/pypa/pipenv.git
7
_set_dependencies
def _set_dependencies(self, analysis, path): for toc in (analysis.binaries, analysis.datas): for i, tpl in enumerate(toc): if not tpl[1] in self._dependencies: logger.debug("Adding dependency %s located in %s", tpl[1], path) self._dependencies[tpl[1]] = path else: dep_path = self._get_relative_path(path, self._dependencies[tpl[1]]) # Ignore references that point to the origin package. This can happen if the same resource is listed # multiple times in TOCs (e.g., once as binary and once as data). if dep_path.endswith(path): logger.debug( "Ignoring self-reference of %s for %s, located in %s - duplicated TOC entry?", tpl[1], path, dep_path ) # Clear the entry as it is a duplicate. toc[i] = (None, None, None) continue logger.debug("Referencing %s to be a dependency for %s, located in %s", tpl[1], path, dep_path) # Determine the path relative to dep_path (i.e, within the target directory) from the 'name' # component of the TOC tuple. rel_path = os.path.dirname(tpl[0]) # Take filename from 'path' (second component of TOC tuple); this way, we don't need to worry about # suffix of extensions. filename = os.path.basename(tpl[1]) # Construct the full file path relative to dep_path... filename = os.path.join(rel_path, filename) # ...and use it in new DEPENDENCY entry analysis.dependencies.append((":".join((dep_path, filename)), tpl[1], "DEPENDENCY")) toc[i] = (None, None, None) # Clean the list toc[:] = [tpl for tpl in toc if tpl != (None, None, None)] # TODO: use pathlib.Path.relative_to() instead.
04984a040c2396127f234518f783cbed088408bb
18
api.py
365
building: move filename processing of EXTENSION entries to analysis stage Move filename processing of EXTENSION TOC entries (i.e., converting the module name to file path and adding the suffix) from the build stage (i.e., `assemble` in `PKG`, `COLLECT`, and `BUNDLE`) into analysis stage. This ensures that during the build stage, the EXTENSION entries in the TOC are already full filenames, same as other file-based entries (DATA, BINARY, etc.). This in turn means that the `add_suffix_to_extension` helper does not need to worry about DEPENDENCY entries anymore, and can process only EXTENSION ones, as implied by its name. Early conversion of EXTENSION module names to file names also prevents duplication when the same file is collected as both an EXTENSION and some other type, for example DATA: ``` ('torch._C', '...\\site-packages\\torch\\_C.cp39-win_amd64.pyd', 'EXTENSION'), ('torch\\_C.cp39-win_amd64.pyd', '...\\site-pakages\\torch\\_C.cp39-win_amd64.pyd', 'DATA'), ``` Prior to this commit, the entries were considered different from the `TOC` perspective, but led to duplication in onefile build's PKG once extension's name was changed to the file name (whereas in onedir build, the first entry was overwritten by the second).
77,548
0
807
237
129
263,997
216
pyinstaller
24
PyInstaller/building/api.py
Python
22
{ "docstring": "\n Synchronize the Analysis result with the needed dependencies.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 7 }
https://github.com/pyinstaller/pyinstaller.git
3
break_up_by_substrings
def _break_up_by_substrings(self): new_submobjects = [] curr_index = 0 for tex_string in self.tex_strings: sub_tex_mob = SingleStringMathTex( tex_string, tex_environment=self.tex_environment, tex_template=self.tex_template, ) num_submobs = len(sub_tex_mob.submobjects) new_index = ( curr_index + num_submobs + len("".join(self.arg_separator.split())) ) if num_submobs == 0: # For cases like empty tex_strings, we want the corresponding # part of the whole MathTex to be a VectorizedPoint # positioned in the right part of the MathTex sub_tex_mob.submobjects = [VectorizedPoint()] last_submob_index = min(curr_index, len(self.submobjects) - 1) sub_tex_mob.move_to(self.submobjects[last_submob_index], RIGHT) else: sub_tex_mob.submobjects = self.submobjects[curr_index:new_index] new_submobjects.append(sub_tex_mob) curr_index = new_index self.submobjects = new_submobjects return self
902e7eb4f0147b5882a613b67467e38a1d47f01e
17
tex_mobject.py
230
Hide more private methods from the docs. (#2468) * hide privs from text_mobject.py * hide privs from tex_mobject.py * hide privs from code_mobject.py * hide privs from svg_mobject.py * remove SVGPath and utils from __init__.py * don't import string_to_numbers * hide privs from geometry.py * hide privs from matrix.py * hide privs from numbers.py * hide privs from three_dimensions.py * forgot underscore under set_stroke_width_from_length * there were more i missed * unhidea method that was used in docs * forgot other text2hash * remove svg_path from docs
46,082
0
394
142
61
189,478
88
manim
23
manim/mobject/svg/tex_mobject.py
Python
23
{ "docstring": "\n Reorganize existing submobjects one layer\n deeper based on the structure of tex_strings (as a list\n of tex_strings)\n ", "language": "en", "n_whitespaces": 46, "n_words": 17, "vocab_size": 16 }
https://github.com/ManimCommunity/manim.git
2
delimitedList
def delimitedList(expr, delim=",", combine=False): dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..." if combine: return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName) else: return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
17
pyparsing.py
132
upd; format
13,253
0
60
77
21
63,322
34
transferlearning
10
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
Python
6
{ "docstring": "Helper to define a delimited list of expressions - the delimiter\n defaults to ','. By default, the list elements and delimiters can\n have intervening whitespace, and comments, but this can be\n overridden by passing ``combine=True`` in the constructor. If\n ``combine`` is set to ``True``, the matching tokens are\n returned as a single token string, with the delimiters included;\n otherwise, the matching tokens are returned as a list of tokens,\n with the delimiters suppressed.\n\n Example::\n\n delimitedList(Word(alphas)).parseString(\"aa,bb,cc\") # -> ['aa', 'bb', 'cc']\n delimitedList(Word(hexnums), delim=':', combine=True).parseString(\"AA:BB:CC:DD:EE\") # -> ['AA:BB:CC:DD:EE']\n ", "language": "en", "n_whitespaces": 127, "n_words": 86, "vocab_size": 61 }
https://github.com/jindongwang/transferlearning.git
2
_gcd_import
def _gcd_import(name, package=None, level=0): _sanity_check(name, package, level) if level > 0: name = _resolve_name(name, package, level) return _find_and_load(name, _gcd_import)
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
_bootstrap.py
66
add python 3.10.4 for windows
55,125
0
38
44
17
218,095
19
XX-Net
7
python3.10.4/Lib/importlib/_bootstrap.py
Python
5
{ "docstring": "Import and return the module based on its name, the package the call is\n being made from, and the level adjustment.\n\n This function represents the greatest common denominator of functionality\n between import_module and __import__. This includes setting __package__ if\n the loader did not.\n\n ", "language": "en", "n_whitespaces": 58, "n_words": 43, "vocab_size": 35 }
https://github.com/XX-net/XX-Net.git
1
get_transactions
def get_transactions(self): df = self.__transactions[ [ "Date", "Type", "Ticker", "Side", "Price", "Quantity", "Fees", "Investment", "Currency", "Sector", "Industry", "Country", "Region", ] ] df = df.replace(np.nan, "-") df["Date"] = df["Date"].dt.strftime("%Y-%m-%d") df.sort_values(by="Date", ascending=False, inplace=True) return df
8e9e6bd57f4bc5d57ccedfacccda6342d5881266
11
portfolio_model.py
157
Incorporate portfolio class into SDK (#3401) * create functions to interact with portfolio * fix some docstrings * view docstrings * make portfolio loading available in sdk * reorder some methods * fix bug * update controller * update website * remove import * change input name * regenerate website * change portfolio arg name * fix metrics bugs * fix report * refactor assets alloc * refactor assets sectors alloc * remove unecessary attributes * refactor allocaasset sector * reorganize class * first refactor alloc * refactor portfolio alloc * black * fix alloc bug * regenerate sdk website * fix alloc bugs * forgot this exception * some refactor on portfolio alloc country region * fix some allocation bugs * add examples * regenerate website Co-authored-by: James Maslek <jmaslek11@gmail.com>
85,861
0
299
87
28
286,538
33
OpenBBTerminal
13
openbb_terminal/portfolio/portfolio_model.py
Python
22
{ "docstring": "Get formatted transactions\n\n Returns\n -------\n pd.DataFrame: formatted transactions\n ", "language": "en", "n_whitespaces": 40, "n_words": 8, "vocab_size": 6 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
get_global_params
def get_global_params(): GlobalParams = namedtuple('GlobalParams', [ 'drop_connect_rate', 'width_coefficient', 'depth_coefficient', 'depth_divisor', 'image_size' ]) global_params = GlobalParams( drop_connect_rate=0.3, width_coefficient=1.2, depth_coefficient=1.4, depth_divisor=8, image_size=64) return global_params
6e607a0fa1cefbf0388dac86c84debf4781cec48
10
rec_efficientb3_pren.py
83
[Feature] Add PREN Scene Text Recognition Model(Accepted in CVPR2021) (#5563) * [Feature] add PREN scene text recognition model * [Patch] Optimize yml File * [Patch] Save Label/Pred Preprocess Time Cost * [BugFix] Modify Shape Conversion to Fit for Inference Model Exportion * [Patch] ? * [Patch] ? * 啥情况...
4,585
0
134
55
20
23,379
22
PaddleOCR
9
ppocr/modeling/backbones/rec_efficientb3_pren.py
Python
12
{ "docstring": "\n The fllowing are efficientnetb3's arch superparams, but to fit for scene \n text recognition task, the resolution(image_size) here is changed \n from 300 to 64.\n ", "language": "en", "n_whitespaces": 54, "n_words": 23, "vocab_size": 22 }
https://github.com/PaddlePaddle/PaddleOCR.git
1
get_extra_addanother_params
def get_extra_addanother_params(self, request, params): return {} # # Request handlers #
8b1a462a6070cb6054af8bb59589c9a2e785afc2
6
object_views.py
25
#10094 changes from code review
78,139
0
34
13
9
265,553
11
netbox
4
netbox/netbox/views/generic/object_views.py
Python
2
{ "docstring": "\n Return a dictionary of extra parameters to use on the Add Another button.\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 13 }
https://github.com/netbox-community/netbox.git
2
_ensure_html_header
def _ensure_html_header(response): # type: (Response) -> None content_type = response.headers.get("Content-Type", "") if not content_type.lower().startswith("text/html"): raise _NotHTML(content_type, response.request.method)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
11
collector.py
76
upd; format
12,262
0
36
42
17
60,723
17
transferlearning
10
.venv/lib/python3.8/site-packages/pip/_internal/index/collector.py
Python
4
{ "docstring": "Check the Content-Type header to ensure the response contains HTML.\n\n Raises `_NotHTML` if the content type is not text/html.\n ", "language": "en", "n_whitespaces": 25, "n_words": 19, "vocab_size": 17 }
https://github.com/jindongwang/transferlearning.git
1
test_get_next_txn
def test_get_next_txn(self) -> None: # Prefill table with 7 rows written by 'master' self._insert_rows("master", 7) id_gen = self._create_id_generator() self.assertEqual(id_gen.get_positions(), {"master": 7}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 7) # Try allocating a new ID gen and check that we only see position # advanced after we leave the context manager.
9d21ecf7ceab55bc19c4457b8b07401b0b1623a7
10
test_id_generators.py
94
Add type hints to tests files. (#12256)
71,924
0
101
98
41
247,791
45
synapse
8
tests/storage/test_id_generators.py
Python
10
{ "docstring": "Test that the `get_next_txn` function works correctly.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/matrix-org/synapse.git
11
sanitize_index
def sanitize_index(ind): from dask.array.utils import asanyarray_safe if ind is None: return None elif isinstance(ind, slice): return slice( _sanitize_index_element(ind.start), _sanitize_index_element(ind.stop), _sanitize_index_element(ind.step), ) elif isinstance(ind, Number): return _sanitize_index_element(ind) elif is_dask_collection(ind): return ind index_array = asanyarray_safe(ind, like=ind) if index_array.dtype == bool: nonzero = np.nonzero(index_array) if len(nonzero) == 1: # If a 1-element tuple, unwrap the element nonzero = nonzero[0] if is_arraylike(nonzero): return nonzero else: return np.asanyarray(nonzero) elif np.issubdtype(index_array.dtype, np.integer): return index_array elif np.issubdtype(index_array.dtype, np.floating): int_index = index_array.astype(np.intp) if np.allclose(index_array, int_index): return int_index else: check_int = np.isclose(index_array, int_index) first_err = index_array.ravel()[np.flatnonzero(~check_int)[0]] raise IndexError("Bad index. Must be integer-like: %s" % first_err) else: raise TypeError("Invalid index type", type(ind), ind)
cccb9d8d8e33a891396b1275c2448c352ef40c27
17
slicing.py
371
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
36,528
0
356
235
72
156,063
103
dask
38
dask/array/slicing.py
Python
35
{ "docstring": "Sanitize the elements for indexing along one axis\n\n >>> sanitize_index([2, 3, 5])\n array([2, 3, 5])\n >>> sanitize_index([True, False, True, False])\n array([0, 2])\n >>> sanitize_index(np.array([1, 2, 3]))\n array([1, 2, 3])\n >>> sanitize_index(np.array([False, True, True]))\n array([1, 2])\n >>> type(sanitize_index(np.int32(0)))\n <class 'int'>\n >>> sanitize_index(1.0)\n 1\n >>> sanitize_index(0.5)\n Traceback (most recent call last):\n ...\n IndexError: Bad index. Must be integer-like: 0.5\n ", "language": "en", "n_whitespaces": 109, "n_words": 57, "vocab_size": 45 }
https://github.com/dask/dask.git
2
test_process_messages_cardinality_limited
def test_process_messages_cardinality_limited(caplog, settings, monkeypatch) -> None: settings.SENTRY_METRICS_INDEXER_DEBUG_LOG_SAMPLE_RATE = 1.0
c48fda09e252018a4d2b831bb84e1c68a739c085
7
test_multiprocess_steps.py
28
feat(metrics): Add cardinality limiter to indexer [sns-1651] (#38428) Reopen of https://github.com/getsentry/sentry/pull/38302 to avoid notification spam See #38257 and https://www.notion.so/sentry/Metrics-Dimensionality-Limiting-df010a6a6d4e467ca3c5c19230db862b#4966fb9c07fc4394b720ad161c99a096. This is just the glue code and configuration options for using the cardinality limiter in the indexer. The actual implementation is TBD. This is safe to merge because the stub implementation does not actually limit anything at all, so it should be fast enough to do synchronously for now ## rollout plan - [x] https://github.com/getsentry/sentry/pull/38446 - [x] set options to nothing in prod - [ ] merge + deploy this PR - [ ] check prod metrics: redis should not be used - [ ] https://github.com/getsentry/sentry/pull/38445 - [ ] check prod metrics: redis should still not be used - [ ] run qe tests? - [ ] get a redis cluster and configure it - [ ] run use_quota on a separate thread - [ ] set a quota - [ ] get rid of writes limiter? - [ ] stop indexing tag values Co-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com> Co-authored-by: Nikhar Saxena <nikhar.saxena@sentry.io>
18,024
0
15
170
9
85,651
9
sentry
5
tests/sentry/sentry_metrics/test_multiprocess_steps.py
Python
28
{ "docstring": "\n Test that the message processor correctly calls the cardinality limiter.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 9 }
https://github.com/getsentry/sentry.git
2
dag_id
def dag_id(self) -> str: if self.dag: return self.dag.dag_id return "_in_memory_dag_"
34154803ac73d62d3e969e480405df3073032622
9
taskmixin.py
38
Show tasks in grid view based on topological sort. (#22741) This takes the existing topological sort that existed on a DAG and moves it down to TaskGroup. In order to do this (and not have duplicated sort) the existing sort on DAG is re-implemented on top of the new method. This also surfaced a tiny bug in deserialize_task_group where the SerializedTaskGroup did not have `dag` set -- it didn't cause any problems until now but was needed to call `upstream_list` on a SerializedTaskGroup object.
9,007
0
42
21
9
46,825
10
airflow
4
airflow/models/taskmixin.py
Python
5
{ "docstring": "Returns dag id if it has one or an adhoc/meaningless ID", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/apache/airflow.git
1
compress
def compress(summary, epsilon): # TODO(b/184863356): remove the numpy escape hatch here. return tf.numpy_function( lambda s: _compress_summary_numpy(s, epsilon), [summary], tf.float32 )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
10
discretization.py
49
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,077
0
39
31
20
272,922
20
keras
8
keras/layers/preprocessing/discretization.py
Python
4
{ "docstring": "Compress a summary to within `epsilon` accuracy.\n\n The compression step is needed to keep the summary sizes small after merging,\n and also used to return the final target boundaries. It finds the new bins\n based on interpolating cumulative weight percentages from the large summary.\n Taking the difference of the cumulative weights from the previous bin's\n cumulative weight will give the new weight for that bin.\n\n Args:\n summary: 2D `np.ndarray` summary to be compressed.\n epsilon: A `'float32'` that determines the approxmiate desired precision.\n\n Returns:\n A 2D `np.ndarray` that is a compressed summary. First column is the\n interpolated partition values, the second is the weights (counts).\n ", "language": "en", "n_whitespaces": 156, "n_words": 104, "vocab_size": 71 }
https://github.com/keras-team/keras.git
2
Head
def Head(num_classes=1000, name=None): if name is None: name = str(backend.get_uid("head"))
2d1086447a25d281f9428832d046c473d80ad761
13
convnext.py
49
Corrected preprocess_input docstring in regnet.py and convnext.py
79,999
0
23
32
9
269,278
10
keras
6
keras/applications/convnext.py
Python
5
{ "docstring": "Implementation of classification head of RegNet.\n\n Args:\n num_classes: number of classes for Dense layer\n name: name prefix\n\n Returns:\n Classification head function.\n ", "language": "en", "n_whitespaces": 33, "n_words": 21, "vocab_size": 18 }
https://github.com/keras-team/keras.git
1
require_tpu
def require_tpu(test_case): return unittest.skipUnless(is_tpu_available(), "test requires TPU")(test_case)
e5c17f36a8b5bf8b9478d416c4a80841a353fb19
10
testing.py
37
Clean up tests + fix import (#330)
121,064
0
13
20
7
337,462
7
accelerate
5
src/accelerate/test_utils/testing.py
Python
2
{ "docstring": "\n Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available.\n ", "language": "en", "n_whitespaces": 24, "n_words": 17, "vocab_size": 16 }
https://github.com/huggingface/accelerate.git
6
_identify_infrequent
def _identify_infrequent(self, category_count, n_samples, col_idx): if isinstance(self.min_frequency, numbers.Integral): infrequent_mask = category_count < self.min_frequency elif isinstance(self.min_frequency, numbers.Real): min_frequency_abs = n_samples * self.min_frequency infrequent_mask = category_count < min_frequency_abs else: infrequent_mask = np.zeros(category_count.shape[0], dtype=bool) n_current_features = category_count.size - infrequent_mask.sum() + 1 if self.max_categories is not None and self.max_categories < n_current_features: # stable sort to preserve original count order smallest_levels = np.argsort(category_count, kind="mergesort")[ : -self.max_categories + 1 ] infrequent_mask[smallest_levels] = True output = np.flatnonzero(infrequent_mask) return output if output.size > 0 else None
7f0006c8aad1a09621ad19c3db19c3ff0555a183
13
_encoders.py
227
ENH Adds infrequent categories to OneHotEncoder (#16018) * ENH Completely adds infrequent categories * STY Linting * STY Linting * DOC Improves wording * DOC Lint * BUG Fixes * CLN Address comments * CLN Address comments * DOC Uses math to description float min_frequency * DOC Adds comment regarding drop * BUG Fixes method name * DOC Clearer docstring * TST Adds more tests * FIX Fixes mege * CLN More pythonic * CLN Address comments * STY Flake8 * CLN Address comments * DOC Fix * MRG * WIP * ENH Address comments * STY Fix * ENH Use functiion call instead of property * ENH Adds counts feature * CLN Rename variables * DOC More details * CLN Remove unneeded line * CLN Less lines is less complicated * CLN Less diffs * CLN Improves readiabilty * BUG Fix * CLN Address comments * TST Fix * CLN Address comments * CLN Address comments * CLN Move docstring to userguide * DOC Better wrapping * TST Adds test to handle_unknown='error' * ENH Spelling error in docstring * BUG Fixes counter with nan values * BUG Removes unneeded test * BUG Fixes issue * ENH Sync with main * DOC Correct settings * DOC Adds docstring * DOC Immprove user guide * DOC Move to 1.0 * DOC Update docs * TST Remove test * DOC Update docstring * STY Linting * DOC Address comments * ENH Neater code * DOC Update explaination for auto * Update sklearn/preprocessing/_encoders.py Co-authored-by: Roman Yurchak <rth.yurchak@gmail.com> * TST Uses docstring instead of comments * TST Remove call to fit * TST Spelling error * ENH Adds support for drop + infrequent categories * ENH Adds infrequent_if_exist option * DOC Address comments for user guide * DOC Address comments for whats_new * DOC Update docstring based on comments * CLN Update test with suggestions * ENH Adds computed property infrequent_categories_ * DOC Adds where the infrequent column is located * TST Adds more test for infrequent_categories_ * DOC Adds docstring for _compute_drop_idx * CLN Moves _convert_to_infrequent_idx into its own method * TST Increases test coverage * TST Adds failing test * CLN Careful consideration of dropped and inverse_transform * STY Linting * DOC Adds docstrinb about dropping infrequent * DOC Uses only * DOC Numpydoc * TST Includes test for get_feature_names_out * DOC Move whats new * DOC Address docstring comments * DOC Docstring changes * TST Better comments * TST Adds check for handle_unknown='ignore' for infrequent * CLN Make _infrequent_indices private * CLN Change min_frequency default to None * DOC Adds comments * ENH adds support for max_categories=1 * ENH Describe lexicon ordering for ties * DOC Better docstring * STY Fix * CLN Error when explicity dropping an infrequent category * STY Grammar Co-authored-by: Joel Nothman <joel.nothman@gmail.com> Co-authored-by: Roman Yurchak <rth.yurchak@gmail.com> Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
75,639
0
237
146
56
259,201
78
scikit-learn
26
sklearn/preprocessing/_encoders.py
Python
16
{ "docstring": "Compute the infrequent indices.\n\n Parameters\n ----------\n category_count : ndarray of shape (n_cardinality,)\n Category counts.\n\n n_samples : int\n Number of samples.\n\n col_idx : int\n Index of the current category. Only used for the error message.\n\n Returns\n -------\n output : ndarray of shape (n_infrequent_categories,) or None\n If there are infrequent categories, indices of infrequent\n categories. Otherwise None.\n ", "language": "en", "n_whitespaces": 173, "n_words": 55, "vocab_size": 41 }
https://github.com/scikit-learn/scikit-learn.git
1
test_port_editor
def test_port_editor(self, mock_unity3d): _ = Unity3DEnv(port=None) args, kwargs = mock_unity3d.call_args mock_unity3d.assert_called_once() self.assertEqual(5004, kwargs.get("base_port"))
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
10
test_unity3d_env.py
70
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
32,988
0
48
41
12
143,428
13
ray
12
rllib/env/wrappers/tests/test_unity3d_env.py
Python
5
{ "docstring": "Test if the environment uses the editor port\n when no environment file is provided", "language": "en", "n_whitespaces": 20, "n_words": 14, "vocab_size": 12 }
https://github.com/ray-project/ray.git
5
get_create_field_function
def get_create_field_function(self, type): create_field_function = getattr(self, "create_%s_field" % type, None) if create_field_function: return create_field_function else: import inspect method_list = [ f[0] for f in inspect.getmembers(self.__class__, inspect.isfunction) if f[0].startswith("create_") and f[0].endswith("_field") ] raise AttributeError( "Could not find function matching format \ create_<fieldname>_field for type: " + type, "Must be one of: " + ", ".join(method_list), )
d10f15e55806c6944827d801cd9c2d53f5da4186
15
forms.py
149
Reformat with black
15,920
0
254
89
48
72,986
55
wagtail
15
wagtail/contrib/forms/forms.py
Python
17
{ "docstring": "\n Takes string of field type and returns a Django Form Field Instance.\n Assumes form field creation functions are in the format:\n 'create_fieldtype_field'\n ", "language": "en", "n_whitespaces": 51, "n_words": 22, "vocab_size": 21 }
https://github.com/wagtail/wagtail.git
1
image_entity
def image_entity(props): return DOM.create_element( "embed", { "embedtype": "image", "format": props.get("format"), "id": props.get("id"), "alt": props.get("alt"), }, )
d10f15e55806c6944827d801cd9c2d53f5da4186
12
contentstate.py
91
Reformat with black
16,356
0
90
48
16
75,104
16
wagtail
5
wagtail/images/rich_text/contentstate.py
Python
10
{ "docstring": "\n Helper to construct elements of the form\n <embed alt=\"Right-aligned image\" embedtype=\"image\" format=\"right\" id=\"1\"/>\n when converting from contentstate data\n ", "language": "en", "n_whitespaces": 31, "n_words": 18, "vocab_size": 18 }
https://github.com/wagtail/wagtail.git
5
_linab
def _linab(arg, symbol): arg = factor_terms(arg.expand()) ind, dep = arg.as_independent(symbol) if arg.is_Mul and dep.is_Add: a, b, x = _linab(dep, symbol) return ind*a, ind*b, x if not arg.is_Add: b = 0 a, x = ind, dep else: b = ind a, x = separatevars(dep).as_independent(symbol, as_Add=False) if x.could_extract_minus_sign(): a = -a x = -x return a, b, x
59d22b6bb7287613d598611027f640d068ca5748
13
bivariate.py
188
Moved imports to higher level
47,919
0
136
118
33
196,419
56
sympy
16
sympy/solvers/bivariate.py
Python
16
{ "docstring": "Return ``a, b, X`` assuming ``arg`` can be written as ``a*X + b``\n where ``X`` is a symbol-dependent factor and ``a`` and ``b`` are\n independent of ``symbol``.\n\n Examples\n ========\n\n >>> from sympy.solvers.bivariate import _linab\n >>> from sympy.abc import x, y\n >>> from sympy import exp, S\n >>> _linab(S(2), x)\n (2, 0, 1)\n >>> _linab(2*x, x)\n (2, 0, x)\n >>> _linab(y + y*x + 2*x, x)\n (y + 2, y, x)\n >>> _linab(3 + 2*exp(x), x)\n (2, 3, exp(x))\n ", "language": "en", "n_whitespaces": 126, "n_words": 78, "vocab_size": 55 }
https://github.com/sympy/sympy.git
6
__getattr__
def __getattr__(self, name): attr = None if name.startswith('do_'): module = name.replace('do_', '') if module_loader.find_plugin(module): setattr(self, name, lambda arg, module=module: self.default(module + ' ' + arg)) attr = object.__getattr__(self, name) elif name.startswith('help_'): module = name.replace('help_', '') if module_loader.find_plugin(module): setattr(self, name, lambda module=module: self.helpdefault(module)) attr = object.__getattr__(self, name) if attr is None: raise AttributeError(f"{self.__class__} does not have a {name} attribute") return attr
34f8168afc1d7047c47adec3730c591a58f4f899
17
console.py
240
ansible-console fixes (#78064) * list collection task actions too * dynamically add execute/help functions when module is found * handle redirection and short names
78,939
0
217
138
38
267,520
60
ansible
16
lib/ansible/cli/console.py
Python
15
{ "docstring": " handle not found to populate dynamically a module function if module matching name exists ", "language": "en", "n_whitespaces": 15, "n_words": 14, "vocab_size": 13 }
https://github.com/ansible/ansible.git
1
test_blacklisted_ip_specific
def test_blacklisted_ip_specific(self) -> None: self.lookups["example.com"] = [(IPv4Address, "192.168.1.1")] channel = self.make_request( "GET", "preview_url?url=http://example.com", shorthand=False ) # No requests made. self.assertEqual(len(self.reactor.tcpClients), 0) self.assertEqual(channel.code, 502) self.assertEqual( channel.json_body, { "errcode": "M_UNKNOWN", "error": "DNS resolution failure during URL preview generation", }, )
32c828d0f760492711a98b11376e229d795fd1b3
11
test_url_preview.py
139
Add type hints to `tests/rest`. (#12208) Co-authored-by: Patrick Cloke <clokep@users.noreply.github.com>
71,729
0
175
81
36
247,541
38
synapse
13
tests/rest/media/v1/test_url_preview.py
Python
17
{ "docstring": "\n Blacklisted IP addresses, found via DNS, are not spidered.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
https://github.com/matrix-org/synapse.git
1
verbatim
def verbatim(parser, token): nodelist = parser.parse(("endverbatim",)) parser.delete_first_token() return VerbatimNode(nodelist.render(Context())) @register.tag
9c19aff7c7561e3a82978a272ecdaad40dda5c00
@register.tag
11
defaulttags.py
70
Refs #33476 -- Reformatted code with Black.
51,443
1
21
36
10
206,254
10
django
11
django/template/defaulttags.py
Python
4
{ "docstring": "\n Stop the template engine from rendering the contents of this block tag.\n\n Usage::\n\n {% verbatim %}\n {% don't process this %}\n {% endverbatim %}\n\n You can also designate a specific closing tag block (allowing the\n unrendered use of ``{% endverbatim %}``)::\n\n {% verbatim myblock %}\n ...\n {% endverbatim myblock %}\n ", "language": "en", "n_whitespaces": 116, "n_words": 50, "vocab_size": 33 }
https://github.com/django/django.git
3
test_large_batch_mixed_efficiency
def test_large_batch_mixed_efficiency(self): with override_settings(DEBUG=True): connection.queries_log.clear() TwoFields.objects.bulk_create( [ TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i + 1) for i in range(100000, 101000) ] ) self.assertLess(len(connection.queries), 10)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
16
tests.py
120
Refs #33476 -- Reformatted code with Black.
50,024
0
155
76
28
201,942
29
django
18
tests/bulk_create/tests.py
Python
10
{ "docstring": "\n Test inserting a large batch with objects having primary key set\n mixed together with objects without PK set.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 16 }
https://github.com/django/django.git
1
test_subdag_pools
def test_subdag_pools(self): dag = DAG('parent', default_args=default_args) subdag = DAG('parent.child', default_args=default_args) session = airflow.settings.Session() pool_1 = airflow.models.Pool(pool='test_pool_1', slots=1) pool_10 = airflow.models.Pool(pool='test_pool_10', slots=10) session.add(pool_1) session.add(pool_10) session.commit() EmptyOperator(task_id='dummy', dag=subdag, pool='test_pool_1') with pytest.raises(AirflowException): SubDagOperator(task_id='child', dag=dag, subdag=subdag, pool='test_pool_1') # recreate dag because failed subdagoperator was already added dag = DAG('parent', default_args=default_args) SubDagOperator(task_id='child', dag=dag, subdag=subdag, pool='test_pool_10') session.delete(pool_1) session.delete(pool_10) session.commit()
49e336ae0302b386a2f47269a6d13988382d975f
11
test_subdag_operator.py
287
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
9,191
0
183
169
38
47,650
53
airflow
25
tests/operators/test_subdag_operator.py
Python
17
{ "docstring": "\n Subdags and subdag tasks can't both have a pool with 1 slot\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
https://github.com/apache/airflow.git
6
test_loss_of_perfect_prediction
def test_loss_of_perfect_prediction(loss, sample_weight): if not loss.is_multiclass: # Use small values such that exp(value) is not nan. raw_prediction = np.array([-10, -0.1, 0, 0.1, 3, 10]) # If link is identity, we must respect the interval of y_pred: if isinstance(loss.link, IdentityLink): eps = 1e-10 low = loss.interval_y_pred.low if not loss.interval_y_pred.low_inclusive: low = low + eps high = loss.interval_y_pred.high if not loss.interval_y_pred.high_inclusive: high = high - eps raw_prediction = np.clip(raw_prediction, low, high) y_true = loss.link.inverse(raw_prediction) else: # HalfMultinomialLoss y_true = np.arange(loss.n_classes).astype(float) # raw_prediction with entries -exp(10), but +exp(10) on the diagonal # this is close enough to np.inf which would produce nan raw_prediction = np.full( shape=(loss.n_classes, loss.n_classes), fill_value=-np.exp(10), dtype=float, ) raw_prediction.flat[:: loss.n_classes + 1] = np.exp(10) if sample_weight == "range": sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0]) loss_value = loss.loss( y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight, ) constant_term = loss.constant_to_optimal_zero( y_true=y_true, sample_weight=sample_weight ) # Comparing loss_value + constant_term to zero would result in large # round-off errors. assert_allclose(loss_value, -constant_term, atol=1e-14, rtol=1e-15) @pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) @pytest.mark.parametrize("sample_weight", [None, "range"])
75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) @pytest.mark.parametrize("sample_weight", [None, "range"])
15
test_loss.py
446
ENH migrate GLMs / TweedieRegressor to linear loss (#22548) Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com>
75,769
1
438
266
110
259,435
159
scikit-learn
43
sklearn/_loss/tests/test_loss.py
Python
32
{ "docstring": "Test value of perfect predictions.\n\n Loss of y_pred = y_true plus constant_to_optimal_zero should sums up to\n zero.\n ", "language": "en", "n_whitespaces": 26, "n_words": 17, "vocab_size": 16 }
https://github.com/scikit-learn/scikit-learn.git
2
set_cmap
def set_cmap(cmap): cmap = colormaps[cmap] rc('image', cmap=cmap.name) im = gci() if im is not None: im.set_cmap(cmap) @_copy_docstring_and_deprecators(matplotlib.image.imread)
a17f4f3bd63e3ca3754f96d7db4ce5197720589b
@_copy_docstring_and_deprecators(matplotlib.image.imread)
9
pyplot.py
82
MNT: convert tests and internal usage way from using mpl.cm.get_cmap
23,562
1
38
39
15
109,381
17
matplotlib
11
lib/matplotlib/pyplot.py
Python
6
{ "docstring": "\n Set the default colormap, and applies it to the current image if any.\n\n Parameters\n ----------\n cmap : `~matplotlib.colors.Colormap` or str\n A colormap instance or the name of a registered colormap.\n\n See Also\n --------\n colormaps\n matplotlib.cm.register_cmap\n matplotlib.cm.get_cmap\n ", "language": "en", "n_whitespaces": 74, "n_words": 36, "vocab_size": 33 }
https://github.com/matplotlib/matplotlib.git
1
test_ddp_sharded_strategy_fit_ckpt_path
def test_ddp_sharded_strategy_fit_ckpt_path(tmpdir): model = BoringModel() trainer = Trainer(strategy="ddp_sharded_spawn", num_processes=2, fast_dev_run=True) trainer.fit(model) checkpoint_path = os.path.join(tmpdir, "model.pt") trainer.save_checkpoint(checkpoint_path) model = BoringModel() trainer = Trainer(strategy="ddp_sharded_spawn", num_processes=2, fast_dev_run=True) trainer.fit(model, ckpt_path=checkpoint_path) @pytest.mark.skip(reason="Not a critical test, skip till drone CI performance improves.") # todo @pytest.mark.skip(reason="Currently unsupported restarting training on different number of devices.") @RunIf(min_gpus=2, skip_windows=True, fairscale=True)
650c710efacd633fa283955145342bb64063c883
@pytest.mark.skip(reason="Not a critical test, skip till drone CI performance improves.") # todo @pytest.mark.skip(reason="Currently unsupported restarting training on different number of devices.") @RunIf(min_gpus=2, skip_windows=True, fairscale=True)
10
test_sharded_strategy.py
197
Rename training plugin test files & names to strategy (#11303)
69,606
1
75
82
40
241,581
50
lightning
24
tests/strategies/test_sharded_strategy.py
Python
9
{ "docstring": "Test to ensure that resuming from checkpoint works.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/Lightning-AI/lightning.git
4
get_provider
def get_provider(moduleOrReq): if isinstance(moduleOrReq, Requirement): return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] try: module = sys.modules[moduleOrReq] except KeyError: __import__(moduleOrReq) module = sys.modules[moduleOrReq] loader = getattr(module, '__loader__', None) return _find_adapter(_provider_factories, loader)(module)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
13
__init__.py
124
upd; format
13,146
0
73
77
22
63,100
27
transferlearning
17
.venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py
Python
10
{ "docstring": "Return an IResourceProvider for the named module or requirement", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/jindongwang/transferlearning.git
2
get_template_env
def get_template_env(self) -> jinja2.Environment: dag = self.get_dag() if dag: return dag.get_template_env() return SandboxedEnvironment(cache_size=0)
ff3bbc3db24f9f3f4f88033d48859fb08fc3237b
9
base.py
57
Implement enough interface for MappedOperator to be baggable (#20945)
8,186
0
52
33
12
44,167
13
airflow
8
airflow/models/base.py
Python
6
{ "docstring": "Fetch a Jinja template environment from the DAG or instantiate empty environment if no DAG.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 14 }
https://github.com/apache/airflow.git
2
load
def load(self): try: return signing.loads( self.session_key, serializer=self.serializer, # This doesn't handle non-default expiry dates, see #19201 max_age=self.get_session_cookie_age(), salt="django.contrib.sessions.backends.signed_cookies", ) except Exception: # BadSignature, ValueError, or unpickling exceptions. If any of # these happen, reset the session. self.create() return {}
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
signed_cookies.py
81
Refs #33476 -- Reformatted code with Black.
50,693
0
197
47
36
204,312
39
django
11
django/contrib/sessions/backends/signed_cookies.py
Python
11
{ "docstring": "\n Load the data from the key itself instead of fetching from some\n external data store. Opposite of _get_session_key(), raise BadSignature\n if signature fails.\n ", "language": "en", "n_whitespaces": 52, "n_words": 23, "vocab_size": 19 }
https://github.com/django/django.git
5
mlsd
def mlsd(self, path="", facts=[]): if facts: self.sendcmd("OPTS MLST " + ";".join(facts) + ";") if path: cmd = "MLSD %s" % path else: cmd = "MLSD" lines = [] self.retrlines(cmd, lines.append) for line in lines: facts_found, _, name = line.rstrip(CRLF).partition(' ') entry = {} for fact in facts_found[:-1].split(";"): key, _, value = fact.partition("=") entry[key.lower()] = value yield (name, entry)
8198943edd73a363c266633e1aa5b2a9e9c9f526
14
ftplib.py
222
add python 3.10.4 for windows
54,776
0
214
129
45
217,427
58
XX-Net
23
python3.10.4/Lib/ftplib.py
Python
16
{ "docstring": "List a directory in a standardized format by using MLSD\n command (RFC-3659). If path is omitted the current directory\n is assumed. \"facts\" is a list of strings representing the type\n of information desired (e.g. [\"type\", \"size\", \"perm\"]).\n\n Return a generator object yielding a tuple of two elements\n for every file found in path.\n First element is the file name, the second one is a dictionary\n including a variable number of \"facts\" depending on the server\n and whether \"facts\" argument has been provided.\n ", "language": "en", "n_whitespaces": 145, "n_words": 82, "vocab_size": 60 }
https://github.com/XX-net/XX-Net.git
1
callbacks
def callbacks(self, callbacks_class) -> "TrainerConfig": self.callbacks_class = callbacks_class return self
2eaa54bd763ae0e63158ae0d939633c804394b78
7
trainer_config.py
31
[RLlib] POC: Config objects instead of dicts (PPO only). (#23491)
34,012
0
31
17
10
147,576
10
ray
3
rllib/agents/trainer_config.py
Python
14
{ "docstring": "Sets the callbacks configuration.\n\n Args:\n callbacks_class: Callbacks class, whose methods will be run during\n various phases of training and environment sample collection.\n See the `DefaultCallbacks` class and\n `examples/custom_metrics_and_callbacks.py` for more usage information.\n\n Returns:\n This updated TrainerConfig object.\n ", "language": "en", "n_whitespaces": 125, "n_words": 37, "vocab_size": 35 }
https://github.com/ray-project/ray.git
2
print_help
def print_help(self): source_txt = CRYPTO_SOURCES.get(self.source, "?") if self.source != "" else "" help_text = f console.print(text=help_text, menu="Crypto - Due Diligence")
a6f7e111e68346aeab315985b3704c2710693b38
10
dd_controller.py
86
Bounty Hunter mood: 11 bugs fixed (#1853) * fix #1850 * fix #1831 * add extra check to Reddit API keys * ignore warning message to update praw api * improve OpenBB links * fix quick performance only on stocks class because I'm James bitch * fix quick performance only on stocks class because I'm James bitch * fix #1829 * fix #1821 * add messari to keys - fix #1819 * example of multiple oclumns to check on options/chains * minor improvement in xlabel re. #1814 * remove repeated command * fix #1698 * fix line too long * fix #1814 fr now * fix tests
84,832
0
48
42
18
284,582
20
OpenBBTerminal
12
openbb_terminal/cryptocurrency/due_diligence/dd_controller.py
Python
56
{ "docstring": "Print help[cmds]\n load load a specific cryptocurrency for analysis\n\n[param]Coin: [/param]{self.coin}\n[param]Source: [/param]{source_txt}\n\n[src]CoinGecko[/src]\n info basic information about loaded coin\n market market stats about loaded coin\n ath all time high related stats for loaded coin\n atl all time low related stats for loaded coin\n web found websites for loaded coin e.g forum, homepage\n social social portals urls for loaded coin, e.g reddit, twitter\n score different kind of scores for loaded coin, e.g developer score, sentiment score\n dev github, bitbucket coin development statistics\n bc links to blockchain explorers for loaded coin\n[src]Glassnode[/src]\n active active addresses\n nonzero addresses with non-zero balances\n change 30d change of supply held on exchange wallets\n eb total balance held on exchanges (in percentage and units)\n[src]Coinglass[/src]\n oi open interest per exchange\n[src]CoinPaprika[/src]\n basic basic information about loaded coin\n ps price and supply related metrics for loaded coin\n mkt all markets for loaded coin\n ex all exchanges where loaded coin is listed\n twitter tweets for loaded coin\n events events related to loaded coin\n[src]Binance[/src]\n binbook order book\n balance coin balance\n[src]Coinbase[/src]\n cbbook order book\n trades last trades\n stats coin stats\n[src]Messari[/src]\n mcapdom market cap dominance\n mt messari timeseries e.g. twitter followers, circ supply, etc\n rm roadmap\n tk tokenomics e.g. circulating/max/total supply, emission type, etc\n pi project information e.g. technology details, public repos, audits, vulns\n team contributors (individuals and organizations)\n inv investors (individuals and organizations)\n gov governance details\n fr fundraising details e.g. treasury accounts, sales rounds, allocation\n links links e.g. whitepaper, github, twitter, youtube, reddit, telegram\n[src]Santiment[/src]\n gh github activity over time\n[src]CryptoPanic[/src]\n news loaded coin's most recent news[/cmds]\n", "language": "en", "n_whitespaces": 814, "n_words": 260, "vocab_size": 163 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
__call__
def __call__(self, hidden_states): # layer norm should always be calculated in float32 variance = jnp.power(hidden_states.astype("f4"), 2).mean(axis=-1, keepdims=True) hidden_states = hidden_states / jnp.sqrt(variance + self.eps) return self.weight * hidden_states # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5DenseActDense with T5->LongT5
a72f1c9f5b907f96cbb7de3bbb02a1d431d34071
13
modeling_flax_longt5.py
91
Add `LongT5` model (#16792) * Initial commit * Make some fixes * Make PT model full forward pass * Drop TF & Flax implementation, fix copies etc * Add Flax model and update some corresponding stuff * Drop some TF things * Update config and flax local attn * Add encoder_attention_type to config * . * Update docs * Do some cleansing * Fix some issues -> make style; add some docs * Fix position_bias + mask addition + Update tests * Fix repo consistency * Fix model consistency by removing flax operation over attn_mask * [WIP] Add PT TGlobal LongT5 * . * [WIP] Add flax tglobal model * [WIP] Update flax model to use the right attention type in the encoder * Fix flax tglobal model forward pass * Make the use of global_relative_attention_bias * Add test suites for TGlobal model * Fix minor bugs, clean code * Fix pt-flax equivalence though not convinced with correctness * Fix LocalAttn implementation to match the original impl. + update READMEs * Few updates * Update: [Flax] improve large model init and loading #16148 * Add ckpt conversion script accoring to #16853 + handle torch device placement * Minor updates to conversion script. * Typo: AutoModelForSeq2SeqLM -> FlaxAutoModelForSeq2SeqLM * gpu support + dtype fix * Apply some suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * * Remove (de)parallelize stuff * Edit shape comments * Update README.md * make fix-copies * Remove caching logic for local & tglobal attention * Apply another batch of suggestions from code review * Add missing checkpoints * Format converting scripts * Drop (de)parallelize links from longT5 mdx * Fix converting script + revert config file change * Revert "Remove caching logic for local & tglobal attention" This reverts commit 2a619828f6ddc3e65bd9bb1725a12b77fa883a46. * Stash caching logic in Flax model * Make side relative bias used always * Drop caching logic in PT model * Return side bias as it was * Drop all remaining model parallel logic * Remove clamp statements * Move test files to the proper place * Update docs with new version of hf-doc-builder * Fix test imports * Make some minor improvements * Add missing checkpoints to docs * Make TGlobal model compatible with torch.onnx.export * Replace some np.ndarray with jnp.ndarray * Fix TGlobal for ONNX conversion + update docs * fix _make_global_fixed_block_ids and masked neg value * update flax model * style and quality * fix imports * remove load_tf_weights_in_longt5 from init and fix copies * add slow test for TGlobal model * typo fix * Drop obsolete is_parallelizable and one warning * Update __init__ files to fix repo-consistency * fix pipeline test * Fix some device placements * [wip]: Update tests -- need to generate summaries to update expected_summary * Fix quality * Update LongT5 model card * Update (slow) summarization tests * make style * rename checkpoitns * finish * fix flax tests Co-authored-by: phungvanduy <pvduy23@gmail.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> Co-authored-by: patil-suraj <surajp815@gmail.com>
5,713
0
68
55
30
31,263
34
transformers
13
src/transformers/models/longt5/modeling_flax_longt5.py
Python
4
{ "docstring": "\n Construct a layernorm module in the LongT5 style; No bias and no subtraction of mean.\n ", "language": "en", "n_whitespaces": 30, "n_words": 15, "vocab_size": 15 }
https://github.com/huggingface/transformers.git
11
partitionwise_graph
def partitionwise_graph(func, layer_name, *args, **kwargs): pairs = [] numblocks = {} for arg in args: if isinstance(arg, _Frame): pairs.extend([arg._name, "i"]) numblocks[arg._name] = (arg.npartitions,) elif isinstance(arg, Scalar): pairs.extend([arg._name, "i"]) numblocks[arg._name] = (1,) elif isinstance(arg, Array): if arg.ndim == 1: pairs.extend([arg.name, "i"]) elif arg.ndim == 0: pairs.extend([arg.name, ""]) elif arg.ndim == 2: pairs.extend([arg.name, "ij"]) else: raise ValueError("Can't add multi-dimensional array to dataframes") numblocks[arg._name] = arg.numblocks elif isinstance(arg, BlockwiseDep): if len(arg.numblocks) == 1: pairs.extend([arg, "i"]) elif len(arg.numblocks) == 2: pairs.extend([arg, "ij"]) else: raise ValueError( f"BlockwiseDep arg {arg!r} has {len(arg.numblocks)} dimensions; only 1 or 2 are supported." ) else: pairs.extend([arg, None]) return blockwise( func, layer_name, "i", *pairs, numblocks=numblocks, concatenate=True, **kwargs )
2a9d34aff0a38be5fc8bfcdec38e5c4a7bafcf0e
20
core.py
437
Move DataFrame ACA aggregations to HLG (#8468)
36,437
0
449
264
67
155,618
107
dask
22
dask/dataframe/core.py
Python
34
{ "docstring": "\n Apply a function partition-wise across arguments to create layer of a graph\n\n This applies a function, ``func``, in an embarrassingly parallel fashion\n across partitions/chunks in the provided arguments. It handles Dataframes,\n Arrays, and scalars smoothly, and relies on the ``blockwise`` machinery\n to provide a nicely symbolic graph.\n\n It is most commonly used in other graph-building functions to create the\n appropriate layer of the resulting dataframe.\n\n Parameters\n ----------\n func: callable\n layer_name: str\n Descriptive name for the operation. Used as the output name\n in the resulting ``Blockwise`` graph layer.\n *args:\n **kwargs:\n\n Returns\n -------\n out: Blockwise graph\n\n Examples\n --------\n >>> subgraph = partitionwise_graph(function, x, y, z=123) # doctest: +SKIP\n >>> layer = partitionwise_graph(function, df, x, z=123) # doctest: +SKIP\n >>> graph = HighLevelGraph.from_collections(name, layer, dependencies=[df, x]) # doctest: +SKIP\n >>> result = new_dd_object(graph, name, metadata, df.divisions) # doctest: +SKIP\n\n See Also\n --------\n map_partitions\n ", "language": "en", "n_whitespaces": 238, "n_words": 140, "vocab_size": 95 }
https://github.com/dask/dask.git
4
cancel
def cancel(self, msg=None): self._log_traceback = False if self.done(): return False if self._fut_waiter is not None: if self._fut_waiter.cancel(msg=msg): # Leave self._fut_waiter; it may be a Task that # catches and ignores the cancellation so we may have # to cancel it again later. return True # It must be the case that self.__step is already scheduled. self._must_cancel = True self._cancel_message = msg return True
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
tasks.py
98
add python 3.10.4 for windows
56,134
0
201
58
45
220,825
63
XX-Net
8
python3.10.4/Lib/asyncio/tasks.py
Python
10
{ "docstring": "Request that this task cancel itself.\n\n This arranges for a CancelledError to be thrown into the\n wrapped coroutine on the next cycle through the event loop.\n The coroutine then has a chance to clean up or even deny\n the request using try/except/finally.\n\n Unlike Future.cancel, this does not guarantee that the\n task will be cancelled: the exception might be caught and\n acted upon, delaying cancellation of the task or preventing\n cancellation completely. The task may also return a value or\n raise a different exception.\n\n Immediately after this method is called, Task.cancelled() will\n not return True (unless the task was already cancelled). A\n task will be marked as cancelled when the wrapped coroutine\n terminates with a CancelledError exception (even if cancel()\n was not called).\n ", "language": "en", "n_whitespaces": 229, "n_words": 122, "vocab_size": 83 }
https://github.com/XX-net/XX-Net.git
2
_dictionary
def _dictionary(self): # type: () -> Dict[str, Any] # NOTE: Dictionaries are not populated if not loaded. So, conditionals # are not needed here. retval = {} for variant in OVERRIDE_ORDER: retval.update(self._config[variant]) return retval
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
11
configuration.py
50
upd; format
12,237
0
100
28
28
60,671
34
transferlearning
7
.venv/lib/python3.8/site-packages/pip/_internal/configuration.py
Python
5
{ "docstring": "A dictionary representing the loaded configuration.\n ", "language": "en", "n_whitespaces": 13, "n_words": 6, "vocab_size": 6 }
https://github.com/jindongwang/transferlearning.git
3
_transpose_for_tf_conv
def _transpose_for_tf_conv(lhs, rhs, dimension_numbers): # TODO(marcvanzee): Add tests for this ops for shape polymorphism. lhs_perm, rhs_perm, _ = dimension_numbers # TODO(marcvanzee): Consider merging tranposes if we want to optimize. # For `lhs_perm` / `output_perm`, perm (0, 1, 2, 3) corresponds to "NCHW". lhs = tf.transpose(lhs, lhs_perm) # lhs --> "NCHW" if len(lhs_perm) == 3: # For 1D convolution, we add a trivial "W" dimension, so that 2D Convolution # logic can be applied downstream. lhs = lhs[:, :, :, np.newaxis] # However, the TF ops only support "NHWC" on CPU, so we transpose again. lhs = tf.transpose(lhs, (0, 2, 3, 1)) # "NCHW" --> "NHWC" # For `rhs_perm`, perm (0, 1, 2, 3) corresponds to "OIHW". rhs = tf.transpose(rhs, rhs_perm) # rhs --> "OIHW" # Handle conv1d case. if len(rhs_perm) == 3: rhs = rhs[:, :, :, np.newaxis] # For the tf ops, rhs is expected to be "OIHW". rhs = tf.transpose(rhs, (2, 3, 1, 0)) # "OIHW" --> "HWIO" return lhs, rhs
4e224bcfb99c3bd9b6a32b8ad7836d12517e788f
10
impl_no_xla.py
192
[jax2tf] Add support for common audio convolutions (1D variants, dilated depthwise, transpose with SAME padding). PiperOrigin-RevId: 458266485
27,024
0
195
121
96
121,058
163
jax
12
jax/experimental/jax2tf/impl_no_xla.py
Python
11
{ "docstring": "Tranposes lhs and rhs to respectively NHWC and HWIO so they can be passed to TF functions.", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 15 }
https://github.com/google/jax.git
3
test_pprint_heap_allocated_type
def test_pprint_heap_allocated_type(): module_name = "xxlimited" if sys.version_info < (3, 10) else "xxlimited_35" expected_output = ( "xxlimited.Null" if sys.version_info < (3, 11) else "xxlimited_35.Null" ) xxlimited = pytest.importorskip(module_name) output = pretty.pretty(xxlimited.Null) assert output == expected_output
d858213d4088237e1481038865bc52ccdd074053
10
test_pretty.py
100
xxlimited_35 module now has the same name in repr in Py 3.11 See https://github.com/python/cpython/commit/a87c9b538fbfc42883417c4d5e69f1a5922690e3
52,497
0
62
59
24
208,740
34
ipython
11
IPython/lib/tests/test_pretty.py
Python
8
{ "docstring": "\n Test that pprint works for heap allocated types.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
https://github.com/ipython/ipython.git
1
test_delete_missing_version
def test_delete_missing_version(self) -> None: e = self.get_failure( self.handler.delete_version(self.local_user, "1"), SynapseError ) res = e.value.code self.assertEqual(res, 404)
652d1669c5a103b1c20478770c4aaf18849c09a3
11
test_e2e_room_keys.py
72
Add missing type hints to tests.handlers. (#14680) And do not allow untyped defs in tests.handlers.
73,359
0
62
44
15
250,281
16
synapse
12
tests/handlers/test_e2e_room_keys.py
Python
7
{ "docstring": "Check that we get a 404 on deleting nonexistent versions", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/matrix-org/synapse.git
10
name
def name(self, pretty=False): # type: (bool) -> str name = ( self.os_release_attr("name") or self.lsb_release_attr("distributor_id") or self.distro_release_attr("name") or self.uname_attr("name") ) if pretty: name = self.os_release_attr("pretty_name") or self.lsb_release_attr( "description" ) if not name: name = self.distro_release_attr("name") or self.uname_attr("name") version = self.version(pretty=True) if version: name = name + " " + version return name or ""
f3166e673fe8d40277b804d35d77dcdb760fc3b3
15
distro.py
186
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,230
0
251
102
31
20,087
53
pipenv
8
pipenv/patched/notpip/_vendor/distro.py
Python
17
{ "docstring": "\n Return the name of the OS distribution, as a string.\n\n For details, see :func:`distro.name`.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
https://github.com/pypa/pipenv.git
1
test_extra_serialized_field_and_multiple_operator_links
def test_extra_serialized_field_and_multiple_operator_links(self, dag_maker): test_date = timezone.DateTime(2019, 8, 1, tzinfo=timezone.utc) with dag_maker(dag_id='simple_dag', start_date=test_date) as dag: CustomOperator(task_id='simple_task', bash_command=["echo", "true"]) serialized_dag = SerializedDAG.to_dict(dag) assert "bash_command" in serialized_dag["dag"]["tasks"][0] dag = SerializedDAG.from_dict(serialized_dag) simple_task = dag.task_dict["simple_task"] assert getattr(simple_task, "bash_command") == ["echo", "true"] ######################################################### # Verify Operator Links work with Serialized Operator ######################################################### # Check Serialized version of operator link only contains the inbuilt Op Link assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [ {'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}}, {'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}}, ] # Test all the extra_links are set assert set(simple_task.extra_links) == { 'BigQuery Console #1', 'BigQuery Console #2', 'airflow', 'github', 'google', } dag_maker.create_dagrun(execution_date=test_date) XCom.set( key='search_query', value=["dummy_value_1", "dummy_value_2"], task_id=simple_task.task_id, dag_id=simple_task.dag_id, execution_date=test_date, ) # Test Deserialized inbuilt link #1 custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1") assert 'https://console.cloud.google.com/bigquery?j=dummy_value_1' == custom_inbuilt_link # Test Deserialized inbuilt link #2 custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2") assert 'https://console.cloud.google.com/bigquery?j=dummy_value_2' == custom_inbuilt_link # Test Deserialized link registered via Airflow Plugin google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name) assert "https://www.google.com" == google_link_from_plugin
0ebd6428e6b484790bfbbe1b8687ef4e6cae10e9
12
test_dag_serialization.py
430
Switch XCom implementation to use run_id (#20975)
8,435
0
494
246
98
44,997
148
airflow
33
tests/serialization/test_dag_serialization.py
Python
34
{ "docstring": "\n Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.\n\n This tests also depends on GoogleLink() registered as a plugin\n in tests/plugins/test_plugin.py\n\n The function tests that if extra operator links are registered in plugin\n in ``operator_extra_links`` and the same is also defined in\n the Operator in ``BaseOperator.operator_extra_links``, it has the correct\n extra link.\n ", "language": "en", "n_whitespaces": 113, "n_words": 56, "vocab_size": 40 }
https://github.com/apache/airflow.git
5
get_mac_addr
def get_mac_addr(self): if ( self.bulb.host_firmware_version and AwesomeVersion(self.bulb.host_firmware_version) >= FIX_MAC_FW ): octets = [int(octet, 16) for octet in self.mac_addr.split(":")] octets[5] = (octets[5] + 1) % 256 return ":".join(f"{octet:02x}" for octet in octets) return self.mac_addr
a0974e0c7297537149985f93544dd6f8ed8cfded
13
light.py
131
Refactor LIFX discovery to prevent duplicate discovery response handling (#72213) * Partially revert #70458 and allow duplicate LIFX discoveries Signed-off-by: Avi Miller <me@dje.li> * Only process one discovery at a time * Revert all LIFX duplicate/inflight discovery checks Also remember LIFX Switches and do as little processing for them as possible. Signed-off-by: Avi Miller <me@dje.li> * Bump aiolifx version to support the latest LIFX devices LIFX added 22 new product definitions to their public product list at the end of January and those new products are defined in aiolifx v0.8.1, so bump the dependency version. Also switched to testing for relays instead of maintaining a seperate list of switch product IDs. Fixes #72894. Signed-off-by: Avi Miller <me@dje.li> * Refactor LIFX discovery to better handle duplicate responses Signed-off-by: Avi Miller <me@dje.li> * Update clear_inflight_discovery with review suggestion Signed-off-by: Avi Miller <me@dje.li> * Move the existing entity check to before the asyncio lock Signed-off-by: Avi Miller <me@dje.li> * Bail out of discovery early and if an entity was created Also ensure that the entity always has a unique ID even if the bulb was not successfully discovered. Signed-off-by: Avi Miller <me@dje.li> Co-authored-by: J. Nick Koston <nick@koston.org>
112,213
0
116
78
28
313,595
33
core
12
homeassistant/components/lifx/light.py
Python
9
{ "docstring": "Increment the last byte of the mac address by one for FW>3.70.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
https://github.com/home-assistant/core.git
2
metrics_names
def metrics_names(self): # This property includes all output names including `loss` and per-output # losses for backward compatibility. return [m.name for m in self.metrics]
84afc5193d38057e2e2badf9c889ea87d80d8fbf
8
training.py
32
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,791
0
52
18
22
271,550
24
keras
5
keras/engine/training.py
Python
2
{ "docstring": "Returns the model's display labels for all outputs.\n\n Note: `metrics_names` are available only after a `keras.Model` has been\n trained/evaluated on actual data.\n\n Examples:\n\n >>> inputs = tf.keras.layers.Input(shape=(3,))\n >>> outputs = tf.keras.layers.Dense(2)(inputs)\n >>> model = tf.keras.models.Model(inputs=inputs, outputs=outputs)\n >>> model.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"mae\"])\n >>> model.metrics_names\n []\n\n >>> x = np.random.random((2, 3))\n >>> y = np.random.randint(0, 2, (2, 2))\n >>> model.fit(x, y)\n >>> model.metrics_names\n ['loss', 'mae']\n\n >>> inputs = tf.keras.layers.Input(shape=(3,))\n >>> d = tf.keras.layers.Dense(2, name='out')\n >>> output_1 = d(inputs)\n >>> output_2 = d(inputs)\n >>> model = tf.keras.models.Model(\n ... inputs=inputs, outputs=[output_1, output_2])\n >>> model.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"mae\", \"acc\"])\n >>> model.fit(x, (y, y))\n >>> model.metrics_names\n ['loss', 'out_loss', 'out_1_loss', 'out_mae', 'out_acc', 'out_1_mae',\n 'out_1_acc']\n\n ", "language": "en", "n_whitespaces": 290, "n_words": 105, "vocab_size": 70 }
https://github.com/keras-team/keras.git
5
from_collections
def from_collections(cls, name, layer, dependencies=()): if len(dependencies) == 1: return cls._from_collection(name, layer, dependencies[0]) layers = {name: layer} deps = {name: set()} for collection in toolz.unique(dependencies, key=id): if is_dask_collection(collection): graph = collection.__dask_graph__() if isinstance(graph, HighLevelGraph): layers.update(graph.layers) deps.update(graph.dependencies) deps[name] |= set(collection.__dask_layers__()) else: key = _get_some_layer_name(collection) layers[key] = graph deps[name].add(key) deps[key] = set() else: raise TypeError(type(collection)) return cls(layers, deps)
dfdc4bbab43678927e30866f06df509483ac5d24
16
highlevelgraph.py
260
Collections with HLG must always implement __dask_layers__ (#8548) Without ``__dask_layers__``, ``HighLevelGraph.from_collections`` will produce a broken layer dependency graph and things will fall apart down the line. Given this, it seems likely that no users should have the use case in production, so it should be safe not to have a deprecation cycle here.
36,421
0
324
165
44
155,542
56
dask
26
dask/highlevelgraph.py
Python
53
{ "docstring": "Construct a HighLevelGraph from a new layer and a set of collections\n\n This constructs a HighLevelGraph in the common case where we have a single\n new layer and a set of old collections on which we want to depend.\n\n This pulls out the ``__dask_layers__()`` method of the collections if\n they exist, and adds them to the dependencies for this new layer. It\n also merges all of the layers from all of the dependent collections\n together into the new layers for this graph.\n\n Parameters\n ----------\n name : str\n The name of the new layer\n layer : Mapping\n The graph layer itself\n dependencies : List of Dask collections\n A list of other dask collections (like arrays or dataframes) that\n have graphs themselves\n\n Examples\n --------\n\n In typical usage we make a new task layer, and then pass that layer\n along with all dependent collections to this method.\n\n >>> def add(self, other):\n ... name = 'add-' + tokenize(self, other)\n ... layer = {(name, i): (add, input_key, other)\n ... for i, input_key in enumerate(self.__dask_keys__())}\n ... graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])\n ... return new_collection(name, graph)\n ", "language": "en", "n_whitespaces": 407, "n_words": 179, "vocab_size": 105 }
https://github.com/dask/dask.git
1
test_hf_classification_bin
def test_hf_classification_bin(self, mock_handler): # create predictor create_sql = model_name = 'spam_classifier' predict_sql = self.hf_test_run(mock_handler, model_name, create_sql, predict_sql) # one line prediction predict_sql = # use predictor ret = self.command_executor.execute_command(parse_sql(predict_sql, dialect='mindsdb')) assert ret.error_code is None
cc6313a0f791ba42782082b1161b6a62578e45f4
12
test_ml_handlers.py
98
fixed one line prediction for new ml handler api
25,865
0
106
54
26
116,887
34
mindsdb
13
tests/unit/test_ml_handlers.py
Python
23
{ "docstring": "\n CREATE PREDICTOR huggingface.spam_classifier\n predict PRED\n USING\n task='text-classification',\n model_name= \"mrm8488/bert-tiny-finetuned-sms-spam-detection\",\n input_column = 'text_spammy',\n labels=['ham','spam']\n \n SELECT h.*\n FROM pg.df as t \n JOIN huggingface.spam_classifier as h\n \n SELECT * from huggingface.spam_classifier\n where text_spammy= 'It is the best time to launch the Robot to get more money. https:\\\\/\\\\/Gof.bode-roesch.de\\\\/Gof'\n ", "language": "en", "n_whitespaces": 216, "n_words": 43, "vocab_size": 37 }
https://github.com/mindsdb/mindsdb.git
5
update_denormalized_fields
def update_denormalized_fields(sender, instance, created, raw, **kwargs): # Skip for new objects or those being populated from raw data if created or raw: return # Look up any denormalized fields referencing this model from the application registry for model, field_name, mappings in registry['denormalized_fields'].get(sender, []): logger.debug(f'Updating denormalized values for {model}.{field_name}') filter_params = { field_name: instance.pk, } update_params = { # Map the denormalized field names to the instance's values denorm: getattr(instance, origin) for denorm, origin in mappings.items() } # TODO: Improve efficiency here by placing conditions on the query? # Update all the denormalized fields with the triggering object's new values count = model.objects.filter(**filter_params).update(**update_params) logger.debug(f'Updated {count} rows')
e96620260a6c1b5cf8cff2112d40d061984a7b2c
13
denormalized.py
180
Closes #9903: Implement a mechanism for automatically updating denormalized fields
78,111
0
223
104
79
265,476
105
netbox
24
netbox/netbox/denormalized.py
Python
13
{ "docstring": "\n Check if the sender has denormalized fields registered, and update them as necessary.\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 13 }
https://github.com/netbox-community/netbox.git
7
aimport
def aimport(self, parameter_s="", stream=None): modname = parameter_s if not modname: to_reload = sorted(self._reloader.modules.keys()) to_skip = sorted(self._reloader.skip_modules.keys()) if stream is None: stream = sys.stdout if self._reloader.check_all: stream.write("Modules to reload:\nall-except-skipped\n") else: stream.write("Modules to reload:\n%s\n" % " ".join(to_reload)) stream.write("\nModules to skip:\n%s\n" % " ".join(to_skip)) else: for _module in [_.strip() for _ in modname.split(",")]: if _module.startswith("-"): _module = _module[1:].strip() self._reloader.mark_module_skipped(_module) else: top_module, top_name = self._reloader.aimport_module(_module) # Inject module to user namespace self.shell.push({top_name: top_module})
8a66e854a87b5147d811bd3bc92c5c2a382633e1
17
autoreload.py
308
Improve parsing for `%aimport`
52,549
0
344
175
49
208,820
69
ipython
28
IPython/extensions/autoreload.py
Python
20
{ "docstring": "%aimport => Import modules for automatic reloading.\n\n %aimport\n List modules to automatically import and not to import.\n\n %aimport foo\n Import module 'foo' and mark it to be autoreloaded for %autoreload 1\n\n %aimport foo, bar\n Import modules 'foo', 'bar' and mark them to be autoreloaded for %autoreload 1\n\n %aimport -foo, bar\n Mark module 'foo' to not be autoreloaded for %autoreload 1, 2, or 3, and 'bar'\n to be autoreloaded for 1.\n ", "language": "en", "n_whitespaces": 140, "n_words": 70, "vocab_size": 35 }
https://github.com/ipython/ipython.git
5
format_datetime
def format_datetime(dt, usegmt=False): now = dt.timetuple() if usegmt: if dt.tzinfo is None or dt.tzinfo != datetime.timezone.utc: raise ValueError("usegmt option requires a UTC datetime") zone = 'GMT' elif dt.tzinfo is None: zone = '-0000' else: zone = dt.strftime("%z") return _format_timetuple_and_zone(now, zone)
8198943edd73a363c266633e1aa5b2a9e9c9f526
12
utils.py
125
add python 3.10.4 for windows
57,131
0
97
72
31
223,881
40
XX-Net
13
python3.10.4/Lib/email/utils.py
Python
11
{ "docstring": "Turn a datetime into a date string as specified in RFC 2822.\n\n If usegmt is True, dt must be an aware datetime with an offset of zero. In\n this case 'GMT' will be rendered instead of the normal +0000 required by\n RFC2822. This is to support HTTP headers involving date stamps.\n ", "language": "en", "n_whitespaces": 65, "n_words": 51, "vocab_size": 44 }
https://github.com/XX-net/XX-Net.git
3
get_controller_target_connections
def get_controller_target_connections(self) -> t.List[SshConnection]: return list(itertools.chain.from_iterable([target.get_controller_target_connections() for target in self.target_profiles if isinstance(target, SshTargetHostProfile)]))
3eb0485dd92c88cc92152d3656d94492db44b183
13
provisioning.py
70
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
79,309
0
77
44
13
268,035
13
ansible
13
test/lib/ansible_test/_internal/provisioning.py
Python
4
{ "docstring": "Return SSH connection(s) for accessing all target hosts from the controller.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/ansible/ansible.git
8
getAvailableActions
def getAvailableActions(self): action = list() for i in range(self.size): for j in range(self.size): if self.state[i][j] == 0: if i > 0: action.append(2) if j > 0: action.append(0) if i < self.size - 1: action.append(3) if j < self.size - 1: action.append(1) return action return action
f0af0c43340763724f139fa68aa1e5a9ffe458b4
15
eight_puzzle.py
161
refactor: clean code Signed-off-by: slowy07 <slowy.arfy@gmail.com>
4,328
0
286
100
24
22,423
45
Python
10
Eight_Puzzle_Solver/eight_puzzle.py
Python
15
{ "docstring": "\n Parameters: Current State\n Returns: Available Actions for Current State\n 0 - Left 1 - Right 2 - Top 3 - Bottom\n Restrictions: state is self.size x self.size Array\n ", "language": "en", "n_whitespaces": 73, "n_words": 28, "vocab_size": 22 }
https://github.com/geekcomputers/Python.git
4
check_picklelib
def check_picklelib(cls, value): try: pickler = from_qualified_name(value) except (ImportError, AttributeError) as exc: raise ValueError( f"Failed to import requested pickle library: {value!r}." ) from exc if not hasattr(pickler, "dumps"): raise ValueError( f"Pickle library at {value!r} does not have a 'dumps' method." ) if not hasattr(pickler, "loads"): raise ValueError( f"Pickle library at {value!r} does not have a 'loads' method." ) return value
83002be7b3d6ec51edcb8252484e52d918c514c1
12
serializers.py
125
Begin sketch of deployment/packager/manifest relationship
11,574
0
224
65
41
56,867
60
prefect
10
src/prefect/packaging/serializers.py
Python
16
{ "docstring": "\n Check that the given pickle library is importable and has dumps/loads methods.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
https://github.com/PrefectHQ/prefect.git
4
dates
def dates(self, field_name, kind, order="ASC"): if kind not in ("year", "month", "week", "day"): raise ValueError("'kind' must be one of 'year', 'month', 'week', or 'day'.") if order not in ("ASC", "DESC"): raise ValueError("'order' must be either 'ASC' or 'DESC'.") return ( self.annotate( datefield=Trunc(field_name, kind, output_field=DateField()), plain_field=F(field_name), ) .values_list("datefield", flat=True) .distinct() .filter(plain_field__isnull=False) .order_by(("-" if order == "DESC" else "") + "datefield") ) # RemovedInDjango50Warning: when the deprecation ends, remove is_dst # argument.
9c19aff7c7561e3a82978a272ecdaad40dda5c00
22
query.py
197
Refs #33476 -- Reformatted code with Black.
51,213
0
229
113
58
205,782
70
django
19
django/db/models/query.py
Python
15
{ "docstring": "\n Return a list of date objects representing all available dates for\n the given field_name, scoped to 'kind'.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 17 }
https://github.com/django/django.git
62
commentArgs
def commentArgs(): # A ton of cases to consider, pylint: disable=too-many-branches,too-many-statements # Inform the user about potential issues with the running version. e.g. unsupported # version. if python_version_str not in getSupportedPythonVersions(): # Do not disturb run of automatic tests with, detected from the presence of # that environment variable. if "PYTHON" not in os.environ: Tracing.general.warning( "The version %r is not currently supported. Expect problems." % python_version_str, ) default_reference_mode = ( "runtime" if shallMakeModule() or isStandaloneMode() else "original" ) if getFileReferenceMode() is None: options.file_reference_mode = default_reference_mode else: if options.file_reference_mode != default_reference_mode: Tracing.options_logger.warning( "Using non-default file reference mode '%s' rather than '%s' may cause runtime issues." % (getFileReferenceMode(), default_reference_mode) ) else: Tracing.options_logger.info( "Using default file reference mode '%s' need not be specified." % default_reference_mode ) # TODO: Not all of these are usable with MSYS2 really, split those off. if getOS() != "Windows": # Too many Windows specific options clearly, pylint: disable=too-many-boolean-expressions if ( getWindowsIconExecutablePath() or shallAskForWindowsAdminRights() or shallAskForWindowsUIAccessRights() or getWindowsCompanyName() or getWindowsProductName() or getWindowsProductVersion() or getWindowsFileVersion() or getForcedStderrPath() # not yet for other platforms or getForcedStdoutPath() or getWindowsSplashScreen() or getIntendedPythonArch() ): Tracing.options_logger.warning( "Using Windows specific options has no effect on other platforms." ) if options.mingw64 or options.msvc_version: Tracing.options_logger.warning( "Requesting Windows specific compilers has no effect on other platforms." ) if isMingw64() and getMsvcVersion(): Tracing.options_logger.sysexit( "Requesting both Windows specific compilers makes no sense." ) if isOnefileMode(): standalone_mode = "onefile" elif isStandaloneMode(): standalone_mode = "standalone" else: standalone_mode = None if standalone_mode and not hasStandaloneSupportedOS(): Tracing.options_logger.warning( "Standalone mode on %s is not known to be supported, might fail to work." % getOS() ) if options.follow_all and standalone_mode: Tracing.options_logger.info( "Following all imports is the default for %s mode and need not be specified." % standalone_mode ) if options.follow_none and standalone_mode: Tracing.options_logger.warning( "Following no imports is unlikely to work for %s mode and should not be specified." % standalone_mode ) if options.follow_stdlib and not standalone_mode: Tracing.options_logger.warning( "Following imports to stdlib is unlikely to work without --standalone/--onefile and should not be specified." ) if ( not shallDumpBuiltTreeXML() and not standalone_mode and not options.follow_all and not options.follow_none and not options.follow_modules and not options.follow_stdlib and not options.include_modules and not options.include_packages and not options.include_extra and not options.follow_not_modules ): Tracing.options_logger.warning( % ("module" if shallMakeModule() else "program") ) if options.dependency_tool: Tracing.options_logger.warning( "Using removed option '--windows-dependency-tool' is deprecated and has no impact anymore." ) if shallMakeModule() and options.static_libpython == "yes": Tracing.options_logger.warning( "In module mode, providing '--static-libpython' has no effect, it's not used." ) options.static_libpython = "no" if ( not isPgoMode() and not isPythonPgoMode() and (getPgoArgs() or getPgoExecutable()) ): Tracing.optimization_logger.warning( "Providing PGO arguments without enabling PGO mode has no effect." ) if isPgoMode(): if isStandaloneMode(): Tracing.optimization_logger.warning( "Using PGO with standalone/onefile mode is not currently working. Expect errors." ) if shallMakeModule(): Tracing.optimization_logger.warning( "Using PGO with module mode is not currently working. Expect errors." ) if ( options.static_libpython == "auto" and not shallMakeModule() and not shallDumpBuiltTreeXML() and not shallUseStaticLibPython() and getSystemStaticLibPythonPath() is not None ): Tracing.options_logger.info( ) if not shallExecuteImmediately(): if shallRunInDebugger(): Tracing.options_logger.warning( "The '--debugger' option has no effect outside of '--debug' without '--run' option." ) if not shallClearPythonPathEnvironment(): Tracing.options_logger.warning( "The '--execute-with-pythonpath' option has no effect without '--run' option." )
a24d709ad2350c081a9a41cd76db72288b8ab014
20
Options.py
941
UI: Added warning when following stdlib in accelerated mode * That's not going to work at this time, so we should inform the user to not use it.
42,687
0
1,656
523
221
178,405
514
Nuitka
57
nuitka/Options.py
Python
136
{ "docstring": "Comment on options, where we know something is not having the intended effect.\n\n :meta private:\n\n You did not specify to follow or include anything but main %s. Check options and \\\nmake sure that is intended.Detected static libpython to exist, consider '--static-libpython=yes' for better performance, \\\nbut errors may happen.", "language": "en", "n_whitespaces": 53, "n_words": 50, "vocab_size": 45 }
https://github.com/Nuitka/Nuitka.git
1
forward
def forward(self, c): batch, cond_channels, cond_length = c.shape c = self.input_conv(c) c = c + self.residual_conv(c) k = self.kernel_conv(c) b = self.bias_conv(c) kernels = k.contiguous().view(batch, self.conv_layers, self.conv_in_channels, self.conv_out_channels, self.conv_kernel_size, cond_length) bias = b.contiguous().view(batch, self.conv_layers, self.conv_out_channels, cond_length) return kernels, bias
0caed984e39c07849a13662894f4cbdbe0a98091
10
modules.py
163
The new vocoder Fre-GAN is now supported (#546) * The new vocoder Fre-GAN is now supported * Improved some fregan details
38,950
0
389
107
27
161,257
39
MockingBird
21
vocoder/fregan/modules.py
Python
17
{ "docstring": "\n Args:\n c (Tensor): the conditioning sequence (batch, cond_channels, cond_length)\n Returns:\n ", "language": "en", "n_whitespaces": 43, "n_words": 10, "vocab_size": 10 }
https://github.com/babysor/MockingBird.git
7
_parse_codestream
def _parse_codestream(fp): hdr = fp.read(2) lsiz = struct.unpack(">H", hdr)[0] siz = hdr + fp.read(lsiz - 2) lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, _, _, _, _, csiz = struct.unpack_from( ">HHIIIIIIIIH", siz ) ssiz = [None] * csiz xrsiz = [None] * csiz yrsiz = [None] * csiz for i in range(csiz): ssiz[i], xrsiz[i], yrsiz[i] = struct.unpack_from(">BBB", siz, 36 + 3 * i) size = (xsiz - xosiz, ysiz - yosiz) if csiz == 1: if (yrsiz[0] & 0x7F) > 8: mode = "I;16" else: mode = "L" elif csiz == 2: mode = "LA" elif csiz == 3: mode = "RGB" elif csiz == 4: mode = "RGBA" else: mode = None return size, mode
ee85e387bab535e2339b9d3cd1ab87c61d23af15
12
Jpeg2KImagePlugin.py
311
Remove redundant parentheses
69,907
0
244
196
67
242,744
115
Pillow
23
src/PIL/Jpeg2KImagePlugin.py
Python
27
{ "docstring": "Parse the JPEG 2000 codestream to extract the size and component\n count from the SIZ marker segment, returning a PIL (size, mode) tuple.", "language": "en", "n_whitespaces": 25, "n_words": 23, "vocab_size": 21 }
https://github.com/python-pillow/Pillow.git
1
_get_postgis_func
def _get_postgis_func(self, func): # Close out the connection. See #9437. with self.connection.temporary_connection() as cursor: cursor.execute("SELECT %s()" % func) return cursor.fetchone()[0]
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
operations.py
66
Refs #33476 -- Reformatted code with Black.
50,549
0
64
36
20
203,832
20
django
8
django/contrib/gis/db/backends/postgis/operations.py
Python
4
{ "docstring": "\n Helper routine for calling PostGIS functions and returning their result.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/django/django.git
5
can_publish_subpage
def can_publish_subpage(self): if not self.user.is_active: return False specific_class = self.page.specific_class if specific_class is None or not specific_class.creatable_subpage_models(): return False return self.user.is_superuser or ("publish" in self.permissions)
d10f15e55806c6944827d801cd9c2d53f5da4186
9
__init__.py
86
Reformat with black
16,134
0
82
51
18
73,833
25
wagtail
9
wagtail/core/models/__init__.py
Python
7
{ "docstring": "\n Niggly special case for creating and publishing a page in one go.\n Differs from can_publish in that we want to be able to publish subpages of root, but not\n to be able to publish root itself. (Also, can_publish_subpage returns false if the page\n does not allow subpages at all.)\n ", "language": "en", "n_whitespaces": 85, "n_words": 49, "vocab_size": 39 }
https://github.com/wagtail/wagtail.git
1
binary_mask_dice_loss
def binary_mask_dice_loss(self, mask_preds, gt_masks): mask_preds = mask_preds.flatten(1) gt_masks = gt_masks.flatten(1).float() numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks) denominator = mask_preds.sum(-1)[:, None] + gt_masks.sum(-1)[None, :] loss = 1 - (numerator + self.eps) / (denominator + self.eps) return loss
cac356380d505bf15587f07c0529218cc36b9652
11
match_cost.py
146
[Feature] Add Maskformer to mmdet (#7212) * first commit * add README * move model description from config to readme add description for binary_input add description for dice loss add a independent panoptic gt processing function add a independent panoptic gt processing function remove compatibility of pretrain in maskformer * update comments in maskformer_head * update docs format
70,203
0
86
92
28
244,031
37
mmdetection
13
mmdet/core/bbox/match_costs/match_cost.py
Python
7
{ "docstring": "\n Args:\n mask_preds (Tensor): Mask prediction in shape (num_query, *).\n gt_masks (Tensor): Ground truth in shape (num_gt, *)\n store 0 or 1, 0 for negative class and 1 for\n positive class.\n\n Returns:\n Tensor: Dice cost matrix in shape (num_query, num_gt).\n ", "language": "en", "n_whitespaces": 124, "n_words": 39, "vocab_size": 31 }
https://github.com/open-mmlab/mmdetection.git
5
_DropCommonSuffixes
def _DropCommonSuffixes(filename): for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h', 'internal.h'): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0]
cc4d0564756ca067516f71718a3d135996525909
15
cpp_lint.py
143
Balanced joint maximum mean discrepancy for deep transfer learning
12,140
0
64
84
25
60,412
30
transferlearning
8
code/deep/BJMMD/caffe/scripts/cpp_lint.py
Python
7
{ "docstring": "Drops common suffixes like _test.cc or -inl.h from filename.\n\n For example:\n >>> _DropCommonSuffixes('foo/foo-inl.h')\n 'foo/foo'\n >>> _DropCommonSuffixes('foo/bar/foo.cc')\n 'foo/bar/foo'\n >>> _DropCommonSuffixes('foo/foo_internal.h')\n 'foo/foo'\n >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')\n 'foo/foo_unusualinternal'\n\n Args:\n filename: The input filename.\n\n Returns:\n The filename with the common suffix removed.\n ", "language": "en", "n_whitespaces": 70, "n_words": 36, "vocab_size": 29 }
https://github.com/jindongwang/transferlearning.git
2
_text_words
def _text_words(self, length, truncate): words = self._wrapped.split() if len(words) > length: words = words[:length] return self.add_truncation_text(" ".join(words), truncate) return " ".join(words)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
text.py
92
Refs #33476 -- Reformatted code with Black.
51,669
0
71
55
18
206,737
21
django
10
django/utils/text.py
Python
6
{ "docstring": "\n Truncate a string after a certain number of words.\n\n Strip newlines in the string.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
https://github.com/django/django.git
1
ulid
def ulid() -> str: ulid_bytes = int(time.time() * 1000).to_bytes(6, byteorder="big") + int( getrandbits(80) ).to_bytes(10, byteorder="big") # This is base32 crockford encoding with the loop unrolled for performance # # This code is adapted from: # https://github.com/ahawker/ulid/blob/06289583e9de4286b4d80b4ad000d137816502ca/ulid/base32.py#L102 # enc = "0123456789ABCDEFGHJKMNPQRSTVWXYZ" return ( enc[(ulid_bytes[0] & 224) >> 5] + enc[ulid_bytes[0] & 31] + enc[(ulid_bytes[1] & 248) >> 3] + enc[((ulid_bytes[1] & 7) << 2) | ((ulid_bytes[2] & 192) >> 6)] + enc[((ulid_bytes[2] & 62) >> 1)] + enc[((ulid_bytes[2] & 1) << 4) | ((ulid_bytes[3] & 240) >> 4)] + enc[((ulid_bytes[3] & 15) << 1) | ((ulid_bytes[4] & 128) >> 7)] + enc[(ulid_bytes[4] & 124) >> 2] + enc[((ulid_bytes[4] & 3) << 3) | ((ulid_bytes[5] & 224) >> 5)] + enc[ulid_bytes[5] & 31] + enc[(ulid_bytes[6] & 248) >> 3] + enc[((ulid_bytes[6] & 7) << 2) | ((ulid_bytes[7] & 192) >> 6)] + enc[(ulid_bytes[7] & 62) >> 1] + enc[((ulid_bytes[7] & 1) << 4) | ((ulid_bytes[8] & 240) >> 4)] + enc[((ulid_bytes[8] & 15) << 1) | ((ulid_bytes[9] & 128) >> 7)] + enc[(ulid_bytes[9] & 124) >> 2] + enc[((ulid_bytes[9] & 3) << 3) | ((ulid_bytes[10] & 224) >> 5)] + enc[ulid_bytes[10] & 31] + enc[(ulid_bytes[11] & 248) >> 3] + enc[((ulid_bytes[11] & 7) << 2) | ((ulid_bytes[12] & 192) >> 6)] + enc[(ulid_bytes[12] & 62) >> 1] + enc[((ulid_bytes[12] & 1) << 4) | ((ulid_bytes[13] & 240) >> 4)] + enc[((ulid_bytes[13] & 15) << 1) | ((ulid_bytes[14] & 128) >> 7)] + enc[(ulid_bytes[14] & 124) >> 2] + enc[((ulid_bytes[14] & 3) << 3) | ((ulid_bytes[15] & 224) >> 5)] + enc[ulid_bytes[15] & 31] )
2a9f043039dc60fc25bc14bab724419bedf746bb
36
ulid.py
856
Use ULID short format for context ids (#71119)
98,556
0
484
614
99
299,641
262
core
9
homeassistant/util/ulid.py
Python
49
{ "docstring": "Generate a ULID.\n\n This ulid should not be used for cryptographically secure\n operations.\n\n 01AN4Z07BY 79KA1307SR9X4MV3\n |----------| |----------------|\n Timestamp Randomness\n 48bits 80bits\n\n This string can be loaded directly with https://github.com/ahawker/ulid\n\n import homeassistant.util.ulid as ulid_util\n import ulid\n ulid.parse(ulid_util.ulid())\n ", "language": "en", "n_whitespaces": 103, "n_words": 36, "vocab_size": 32 }
https://github.com/home-assistant/core.git
2
_train_with_recompute
def _train_with_recompute(n_steps): img_dim, n_channels, batch_size = 256, 1, 4 x, y = _get_dummy_data(img_dim, n_channels, batch_size) # This model is the same model as _get_big_cnn_model but split into 3 parts. models = _get_split_cnn_model( img_dim, n_channels, num_partitions=3, blocks_per_partition=2 ) model1, model2, model3 = models # Apply gradient checkpointing to the submodels using tf.recompute_grad. model1_re = tf.recompute_grad(model1) model2_re = tf.recompute_grad(model2) model3_re = tf.recompute_grad(model3) optimizer = optimizers.SGD() tr_vars = ( model1.trainable_variables + model2.trainable_variables + model3.trainable_variables ) losses = [] for _ in range(n_steps): with tf.GradientTape() as tape: logits1 = model1_re(x) logits2 = model2_re(logits1) logits3 = model3_re(logits2) loss = _compute_loss(logits3, y) losses.append(loss) grads = tape.gradient(loss, tr_vars) # tr_vars optimizer.apply_gradients(zip(grads, tr_vars)) del grads return losses @tf_test_utils.with_eager_op_as_function
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@tf_test_utils.with_eager_op_as_function
13
gradient_checkpoint_test.py
288
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,977
1
284
176
82
272,207
110
keras
42
keras/integration_test/gradient_checkpoint_test.py
Python
28
{ "docstring": "Trains a single large model with gradient checkpointing using tf.recompute_grad.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/keras-team/keras.git
2
test_edit_post
def test_edit_post(self): # Send request response = self.client.post( reverse("wagtaildocs:edit_multiple", args=(self.doc.id,)), { "doc-%d-%s" % (self.doc.id, field): data for field, data in self.edit_post_data.items() }, ) # Check response self.assertEqual(response.status_code, 200) self.assertEqual(response["Content-Type"], "application/json") # Check JSON response_json = json.loads(response.content.decode()) self.assertIn("doc_id", response_json) self.assertNotIn("form", response_json) self.assertIn("success", response_json) self.assertEqual(response_json["doc_id"], self.doc.id) self.assertTrue(response_json["success"]) self.check_doc_after_edit()
d10f15e55806c6944827d801cd9c2d53f5da4186
14
test_admin_views.py
246
Reformat with black
16,323
0
214
147
38
74,807
46
wagtail
24
wagtail/documents/tests/test_admin_views.py
Python
17
{ "docstring": "\n This tests that a POST request to the edit view edits the document\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 12 }
https://github.com/wagtail/wagtail.git
1
clamped
def clamped(self) -> Offset: x, y = self return Offset(max(x, 0), max(y, 0))
7bca184192191689b8a7247c92392d6b238df3d7
9
geometry.py
48
tweak for colors
44,981
0
34
30
13
185,337
13
textual
6
src/textual/geometry.py
Python
8
{ "docstring": "Ensure x and y are above zero.\n\n Returns:\n Offset: New offset.\n ", "language": "en", "n_whitespaces": 36, "n_words": 11, "vocab_size": 11 }
https://github.com/Textualize/textual.git
1
make_authors_file_lines
def make_authors_file_lines(git_people): # define new lines for the file header = filldedent().lstrip() header_extra = f"There are a total of {len(git_people)} authors.
5373c833ee895fb95f791849c6082ceb698b8dcc
11
mailmap_check.py
108
maint: tweaks in mailmap_check.py
47,395
0
29
53
20
195,742
21
sympy
11
bin/mailmap_check.py
Python
15
{ "docstring": "\n All people who contributed to SymPy by sending at least a patch or\n more (in the order of the date of their first contribution), except\n those who explicitly didn't want to be mentioned. People with a * next\n to their names are not found in the metadata of the git history. This\n file is generated automatically by running `./bin/authors_update.py`.\n \n lines = header.splitlines()\n lines.append('')\n lines.append(header_extra)\n lines.append('')\n lines.extend(git_people)\n return lines\n\n", "language": "en", "n_whitespaces": 129, "n_words": 68, "vocab_size": 55 }
https://github.com/sympy/sympy.git