ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
49,578
200,279
30
sympy/testing/runtests.py
15
11
def get_sympy_dir(): this_file = os.path.abspath(__file__) sympy_dir = os.path.join(os.path.dirname(this_file), ".
runtests.py: Undo auto-formatting, re-add changes to blacklist for scipy, numpy
get_sympy_dir
6d2bbf80752549276a968fd4af78231c569d55c5
sympy
runtests.py
11
5
https://github.com/sympy/sympy.git
1
55
0
12
93
Python
{ "docstring": "\n Returns the root SymPy directory and set the global value\n indicating whether the system is case sensitive or not.\n ", "language": "en", "n_whitespaces": 29, "n_words": 19, "vocab_size": 17 }
def get_sympy_dir(): this_file = os.path.abspath(__file__) sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..") sympy_dir = os.path.normpath(sympy_dir) return os.path.normcase(sympy_dir)
12,150
60,422
215
code/deep/BJMMD/caffe/scripts/cpp_lint.py
99
13
def CheckCaffeRandom(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] for f
Balanced joint maximum mean discrepancy for deep transfer learning
CheckCaffeRandom
cc4d0564756ca067516f71718a3d135996525909
transferlearning
cpp_lint.py
17
10
https://github.com/jindongwang/transferlearning.git
6
90
0
86
273
Python
{ "docstring": "Checks for calls to C random functions (rand, rand_r, random, ...).\n\n Caffe code should (almost) always use the caffe_rng_* functions rather\n than these, as the internal state of these C functions is independent of the\n native Caffe RNG system which should produce deterministic results for a\n fixed Caffe seed set using Caffe::set_random_seed(...).\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ", "language": "en", "n_whitespaces": 102, "n_words": 84, "vocab_size": 64 }
def CheckCaffeRandom(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] for function in c_random_function_list: ix = line.find(function) # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and line[ix - 1] not in ('_', '.', '>'))): error(filename, linenum, 'caffe/random_fn', 2, 'Use caffe_rng_rand() (or other caffe_rng_* function) instead of ' + function + ') to ensure results are deterministic for a fixed Caffe seed.') threading_list = ( ('asctime(', 'asctime_r('), ('ctime(', 'ctime_r('), ('getgrgid(', 'getgrgid_r('), ('getgrnam(', 'getgrnam_r('), ('getlogin(', 'getlogin_r('), ('getpwnam(', 'getpwnam_r('), ('getpwuid(', 'getpwuid_r('), ('gmtime(', 'gmtime_r('), ('localtime(', 'localtime_r('), ('strtok(', 'strtok_r('), ('ttyname(', 'ttyname_r('), )
7,678
42,650
275
tests/jobs/test_backfill_job.py
89
35
def test_mapped_dag(self, dag_id, executor_name, session): # This test needs a real executor to run, so that the `make_list` task can write out the TaskMap from airflow.executors.executor_loader import ExecutorLoader self.dagbag.process_file(str(TEST_DAGS_FOLDER / f'{dag_id}.py')) dag = self.dagbag.get_dag(dag_id) when = datetime.datetime(2022, 1, 1) job = BackfillJob( dag=dag, start_date=when, end_date=when, donot_pickle=True, executor=ExecutorLoader.load_executor(executor_name), ) job.run() dr = DagRun.find(dag_id=dag.dag_id, execution_date=when, session=session)[0] assert dr assert dr.state == DagRunState.SUCCESS # Check that every task has a start and end date for ti in dr.task_instances: assert ti.state == TaskInstanceState.SUCCESS assert ti.start_date is not None assert ti.end_date is not Non
Replaced all days_ago functions with datetime functions (#23237) Co-authored-by: Dev232001 <thedevhooda@gmail.com>
test_mapped_dag
f352ee63a5d09546a7997ba8f2f8702a1ddb4af7
airflow
test_backfill_job.py
12
20
https://github.com/apache/airflow.git
2
153
0
72
233
Python
{ "docstring": "\n End-to-end test of a simple mapped dag.\n\n We test with multiple executors as they have different \"execution environments\" -- for instance\n DebugExecutor runs a lot more in the same process than other Executors.\n\n ", "language": "en", "n_whitespaces": 62, "n_words": 33, "vocab_size": 31 }
def test_mapped_dag(self, dag_id, executor_name, session): # This test needs a real executor to run, so that the `make_list` task can write out the TaskMap from airflow.executors.executor_loader import ExecutorLoader self.dagbag.process_file(str(TEST_DAGS_FOLDER / f'{dag_id}.py')) dag = self.dagbag.get_dag(dag_id) when = datetime.datetime(2022, 1, 1) job = BackfillJob( dag=dag, start_date=when, end_date=when, donot_pickle=True, executor=ExecutorLoader.load_executor(executor_name), ) job.run() dr = DagRun.find(dag_id=dag.dag_id, execution_date=when, session=session)[0] assert dr assert dr.state == DagRunState.SUCCESS # Check that every task has a start and end date for ti in dr.task_instances: assert ti.state == TaskInstanceState.SUCCESS assert ti.start_date is not None assert ti.end_date is not None
91,370
292,272
203
tests/components/http/test_init.py
69
16
async def test_emergency_ssl_certificate_when_invalid(hass, tmpdir, caplog): cert_path, key_path = await hass.async_add_executor_job( _setup_broken_ssl_pem_files, tmpdir ) hass.config.safe_mode = True assert ( await async_setup_component( hass, "http", { "http": {"ssl_certificate": cert_path, "ssl_key": key_path}, }, ) is True ) await hass.async_start() await hass.async_block_till_done() assert ( "Home Assistant is running in safe mode with an emergency self signed ssl certificate because the configured SSL certificate w
Startup with an emergency self signed cert if the ssl certificate cannot be loaded (#66707)
test_emergency_ssl_certificate_when_invalid
3bf2be1765f7a33fbce06cbabeb2e2115f2f07c7
core
test_init.py
15
22
https://github.com/home-assistant/core.git
1
87
0
52
145
Python
{ "docstring": "Test http can startup with an emergency self signed cert when the current one is broken.", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 16 }
async def test_emergency_ssl_certificate_when_invalid(hass, tmpdir, caplog): cert_path, key_path = await hass.async_add_executor_job( _setup_broken_ssl_pem_files, tmpdir ) hass.config.safe_mode = True assert ( await async_setup_component( hass, "http", { "http": {"ssl_certificate": cert_path, "ssl_key": key_path}, }, ) is True ) await hass.async_start() await hass.async_block_till_done() assert ( "Home Assistant is running in safe mode with an emergency self signed ssl certificate because the configured SSL certificate was not usable" in caplog.text ) assert hass.http.site is not None
51,721
206,811
50
django/views/debug.py
18
9
def get_safe_request_meta(self, request): if not hasattr(request, "META"): return {} return {k: self.cleanse_setting(k, v) for k, v in request.M
Refs #33476 -- Reformatted code with Black.
get_safe_request_meta
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
debug.py
10
4
https://github.com/django/django.git
3
45
0
17
73
Python
{ "docstring": "\n Return a dictionary of request.META with sensitive values redacted.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
def get_safe_request_meta(self, request): if not hasattr(request, "META"): return {} return {k: self.cleanse_setting(k, v) for k, v in request.META.items()}
70,802
245,466
23
mmdet/models/data_preprocessors/data_preprocessor.py
9
7
def cuda(self, *args, **kwargs) -> nn.Module: return self.data_preprocessor.cuda(*args, **
[Feature] Support MultiDataPreprocessor (#8495) * Support MultiDataPreprocessor * Fix some commits * Fix a bug * Inherit from the BaseDataPreprocessor
cuda
b564ad32895ac4c2c0a18ba0e32c8c5ccb593df4
mmdetection
data_preprocessor.py
8
7
https://github.com/open-mmlab/mmdetection.git
1
29
0
8
47
Python
{ "docstring": "Overrides this method to set the :attr:`device`\n\n Returns:\n nn.Module: The model itself.\n ", "language": "en", "n_whitespaces": 37, "n_words": 12, "vocab_size": 12 }
def cuda(self, *args, **kwargs) -> nn.Module: return self.data_preprocessor.cuda(*args, **kwargs)
16,023
73,478
19
wagtail/contrib/settings/models.py
5
7
def get_cache_attr_name(cls): return "_{}.{}".format(cls._meta.app_label, cl
Reformat with black
get_cache_attr_name
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
models.py
11
2
https://github.com/wagtail/wagtail.git
1
27
0
5
47
Python
{ "docstring": "\n Returns the name of the attribute that should be used to store\n a reference to the fetched/created object on a request.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 17 }
def get_cache_attr_name(cls): return "_{}.{}".format(cls._meta.app_label, cls._meta.model_name).lower()
5,191
29,046
485
saleor/graphql/product/mutations/products.py
77
25
def get_instance(cls, info, **data): object_id = data.get("id") object_sku = data.get
Allow to update/delete product variant by providing SKU (#10861) * Allow to update/delete product variants by providing SKU * Review changes * Add SKU argument to ProductVariantStocksUpdate/Delete mutations * Review fixes * CHANGELOG.md update * Code readability improvement
get_instance
0b46c89dfd9e5e22defb45cbd9869403a7817320
saleor
products.py
18
29
https://github.com/saleor/saleor.git
5
140
0
58
242
Python
{ "docstring": "Prefetch related fields that are needed to process the mutation.\n\n If we are updating an instance and want to update its attributes,\n # prefetch them.\n ", "language": "en", "n_whitespaces": 46, "n_words": 25, "vocab_size": 23 }
def get_instance(cls, info, **data): object_id = data.get("id") object_sku = data.get("sku") attributes = data.get("attributes") if attributes: # Prefetches needed by AttributeAssignmentMixin and # associate_attribute_values_to_instance qs = cls.Meta.model.objects.prefetch_related( "product__product_type__variant_attributes__values", "product__product_type__attributevariant", ) else: # Use the default queryset. qs = models.ProductVariant.objects.all() if object_id: return cls.get_node_or_error( info, object_id, only_type="ProductVariant", qs=qs ) elif object_sku: instance = qs.filter(sku=object_sku).first() if not instance: raise ValidationError( { "sku": ValidationError( "Couldn't resolve to a node: %s" % object_sku, code="not_found", ) } ) return instance else: return cls._meta.model()
39,495
163,773
71
pandas/core/indexes/base.py
22
9
def _can_use_libjoin(self) -> bool: if type(self) is Index: # excludes EAs return isinstance(self.dtype, np.dtype) return not is_interval_dtype(self.dtype)
ENH: ExtensionEngine (#45514)
_can_use_libjoin
4248b23371a70b339a2c16b8e5caca9c2e5897f8
pandas
base.py
10
7
https://github.com/pandas-dev/pandas.git
2
35
0
19
61
Python
{ "docstring": "\n Whether we can use the fastpaths implement in _libs.join\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
def _can_use_libjoin(self) -> bool: if type(self) is Index: # excludes EAs return isinstance(self.dtype, np.dtype) return not is_interval_dtype(self.dtype) # -------------------------------------------------------------------- # Uncategorized Methods
47,589
196,089
113
sympy/combinatorics/free_groups.py
34
13
def sub_syllables(self, from_i, to_j): if not isinstance(from_i, int) or not isinstance(to_j, int): raise ValueError("both arguments should be integers") group = self.group if to_j <= from_i: return group.identity else: r = tuple(self.array_form[from_i: t
Updated import locations
sub_syllables
498015021131af4dbb07eb110e5badaba8250c7b
sympy
free_groups.py
13
9
https://github.com/sympy/sympy.git
4
68
0
30
110
Python
{ "docstring": "\n `sub_syllables` returns the subword of the associative word `self` that\n consists of syllables from positions `from_to` to `to_j`, where\n `from_to` and `to_j` must be positive integers and indexing is done\n with origin 0.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import free_group\n >>> f, a, b = free_group(\"a, b\")\n >>> w = a**5*b*a**2*b**-4*a\n >>> w.sub_syllables(1, 2)\n b\n >>> w.sub_syllables(3, 3)\n <identity>\n\n ", "language": "en", "n_whitespaces": 158, "n_words": 59, "vocab_size": 48 }
def sub_syllables(self, from_i, to_j): if not isinstance(from_i, int) or not isinstance(to_j, int): raise ValueError("both arguments should be integers") group = self.group if to_j <= from_i: return group.identity else: r = tuple(self.array_form[from_i: to_j]) return group.dtype(r)
@set_module('numpy')
38,760
160,855
180
numpy/core/_ufunc_config.py
72
19
def seterr(all=None, divide=None, over=None, under=None, invalid=None): pyvals = umath.geterrobj() old = geterr() if divide is None: divide = all or old['divide'] if over is None: over = all or old['over'] if under is None: under = all or old['under'] if i
DOC: Fixup docs for improved scalar floating point warning message
seterr
2223a09864e4ccf5206b78684d3db5c853336df9
numpy
_ufunc_config.py
13
18
https://github.com/numpy/numpy.git
9
145
1
39
235
Python
{ "docstring": "\n Set how floating-point errors are handled.\n\n Note that operations on integer scalar types (such as `int16`) are\n handled like floating point, and are affected by these settings.\n\n Parameters\n ----------\n all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional\n Set treatment for all types of floating-point errors at once:\n\n - ignore: Take no action when the exception occurs.\n - warn: Print a `RuntimeWarning` (via the Python `warnings` module).\n - raise: Raise a `FloatingPointError`.\n - call: Call a function specified using the `seterrcall` function.\n - print: Print a warning directly to ``stdout``.\n - log: Record error in a Log object specified by `seterrcall`.\n\n The default is not to change the current behavior.\n divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional\n Treatment for division by zero.\n over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional\n Treatment for floating-point overflow.\n under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional\n Treatment for floating-point underflow.\n invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional\n Treatment for invalid floating-point operation.\n\n Returns\n -------\n old_settings : dict\n Dictionary containing the old settings.\n\n See also\n --------\n seterrcall : Set a callback function for the 'call' mode.\n geterr, geterrcall, errstate\n\n Notes\n -----\n The floating-point exceptions are defined in the IEEE 754 standard [1]_:\n\n - Division by zero: infinite result obtained from finite numbers.\n - Overflow: result too large to be expressed.\n - Underflow: result so close to zero that some precision\n was lost.\n - Invalid operation: result is not an expressible number, typically\n indicates that a NaN was produced.\n\n .. [1] https://en.wikipedia.org/wiki/IEEE_754\n\n Examples\n --------\n >>> old_settings = np.seterr(all='ignore') #seterr to known value\n >>> np.seterr(over='raise')\n {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}\n >>> np.seterr(**old_settings) # reset to default\n {'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'}\n\n >>> np.int16(32000) * np.int16(3)\n 30464\n >>> old_settings = np.seterr(all='warn', over='raise')\n >>> np.int16(32000) * np.int16(3)\n Traceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n FloatingPointError: overflow encountered in scalar multiply\n\n >>> old_settings = np.seterr(all='print')\n >>> np.geterr()\n {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}\n >>> np.int16(32000) * np.int16(3)\n 30464\n\n ", "language": "en", "n_whitespaces": 577, "n_words": 336, "vocab_size": 195 }
def seterr(all=None, divide=None, over=None, under=None, invalid=None): pyvals = umath.geterrobj() old = geterr() if divide is None: divide = all or old['divide'] if over is None: over = all or old['over'] if under is None: under = all or old['under'] if invalid is None: invalid = all or old['invalid'] maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) + (_errdict[over] << SHIFT_OVERFLOW) + (_errdict[under] << SHIFT_UNDERFLOW) + (_errdict[invalid] << SHIFT_INVALID)) pyvals[1] = maskvalue umath.seterrobj(pyvals) return old @set_module('numpy')
3,339
20,351
677
pipenv/patched/notpip/_vendor/pygments/formatters/img.py
144
37
def _create_drawables(self, tokensource): lineno = charno = maxcharno = 0 maxlinelength = linelength = 0 for ttype, value in tokensource: while ttype not in self.styles: ttype = ttype.parent style = self.styles[ttype] # TODO: make sure tab expansion happens earlier in the chain. It # really ought to be done on the input, as to do it right here is # quite complex. value = value.expandtabs(4) lines = value.splitlines(True) # print lines for i, line in enumerate(lines): temp = line.rstrip('\n') if temp: self._draw_text( self._get_text_pos(linelength, lineno), temp, font = self._get_style_font(style), text_fg = self._get_text_color(style), text_bg
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
_create_drawables
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
img.py
16
31
https://github.com/pypa/pipenv.git
6
197
0
89
318
Python
{ "docstring": "\n Create drawables for the token content.\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
def _create_drawables(self, tokensource): lineno = charno = maxcharno = 0 maxlinelength = linelength = 0 for ttype, value in tokensource: while ttype not in self.styles: ttype = ttype.parent style = self.styles[ttype] # TODO: make sure tab expansion happens earlier in the chain. It # really ought to be done on the input, as to do it right here is # quite complex. value = value.expandtabs(4) lines = value.splitlines(True) # print lines for i, line in enumerate(lines): temp = line.rstrip('\n') if temp: self._draw_text( self._get_text_pos(linelength, lineno), temp, font = self._get_style_font(style), text_fg = self._get_text_color(style), text_bg = self._get_text_bg_color(style), ) temp_width, temp_hight = self.fonts.get_text_size(temp) linelength += temp_width maxlinelength = max(maxlinelength, linelength) charno += len(temp) maxcharno = max(maxcharno, charno) if line.endswith('\n'): # add a line for each extra line in the value linelength = 0 charno = 0 lineno += 1 self.maxlinelength = maxlinelength self.maxcharno = maxcharno self.maxlineno = lineno
89,156
290,030
57
homeassistant/util/dt.py
26
12
def __monotonic_time_coarse() -> float: return time.clock_gett
Significantly reduce clock_gettime syscalls on platforms with broken vdso (#81257)
__monotonic_time_coarse
1589c06203c0bc9f87adcc97fe34d5c52aaf403a
core
dt.py
13
12
https://github.com/home-assistant/core.git
1
14
0
24
96
Python
{ "docstring": "Return a monotonic time in seconds.\n\n This is the coarse version of time_monotonic, which is faster but less accurate.\n\n Since many arm64 and 32-bit platforms don't support VDSO with time.monotonic\n because of errata, we can't rely on the kernel to provide a fast\n monotonic time.\n\n https://lore.kernel.org/lkml/20170404171826.25030-1-marc.zyngier@arm.com/\n ", "language": "en", "n_whitespaces": 64, "n_words": 46, "vocab_size": 41 }
def __monotonic_time_coarse() -> float: return time.clock_gettime(CLOCK_MONOTONIC_COARSE) monotonic_time_coarse = time.monotonic with suppress(Exception): if ( platform.system() == "Linux" and abs(time.monotonic() - __monotonic_time_coarse()) < 1 ): monotonic_time_coarse = __monotonic_time_coarse
5,803
31,789
487
tests/test_feature_extraction_common.py
129
32
def prepare_image_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: image_inputs = [] for i in range(feature_ex
Compute min_resolution in prepare_image_inputs (#17915) Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
prepare_image_inputs
6aae59d0b54f04c13a79f80b708622db8e8a17e4
transformers
test_feature_extraction_common.py
17
31
https://github.com/huggingface/transformers.git
11
226
0
87
344
Python
{ "docstring": "This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,\n or a list of PyTorch tensors if one specifies torchify=True.\n ", "language": "en", "n_whitespaces": 34, "n_words": 28, "vocab_size": 18 }
def prepare_image_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: image_inputs = [] for i in range(feature_extract_tester.batch_size): image_inputs.append( np.random.randint( 255, size=( feature_extract_tester.num_channels, feature_extract_tester.max_resolution, feature_extract_tester.max_resolution, ), dtype=np.uint8, ) ) else: image_inputs = [] # To avoid getting image width/height 0 min_resolution = feature_extract_tester.min_resolution if getattr(feature_extract_tester, "size_divisor", None): # If `size_divisor` is defined, the image needs to have width/size >= `size_divisor` min_resolution = max(feature_extract_tester.size_divisor, min_resolution) for i in range(feature_extract_tester.batch_size): width, height = np.random.choice(np.arange(min_resolution, feature_extract_tester.max_resolution), 2) image_inputs.append( np.random.randint(255, size=(feature_extract_tester.num_channels, width, height), dtype=np.uint8) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] if torchify: image_inputs = [torch.from_numpy(x) for x in image_inputs] return image_inputs
81,356
275,266
22
keras/optimizers/optimizer_experimental/optimizer.py
8
6
def _update_step_xla(self, gradient, variable, key): return self._update_step(gradient,
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
_update_step_xla
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
optimizer.py
7
2
https://github.com/keras-team/keras.git
1
21
0
8
32
Python
{ "docstring": "A wrapper of `update_step` to enable XLA acceleration.\n\n Due to `tf.function` tracing mechanism, for (gradient, variable) pairs of\n the same shape and dtype, the execution graph always invoke the first\n pair it has seen. Thus, we need a `key` argument to make each\n (gradient, variable) pair unique. In additions, XLA cannot understand\n string input, so the key is an integer.\n\n Args:\n gradient: backpropagated gradient of the given variable.\n variable: variable whose value needs to be updated.\n key (int): a unique key that identifies the variable.\n\n Returns:\n An `Operation` that applies the specified gradients.\n ", "language": "en", "n_whitespaces": 185, "n_words": 93, "vocab_size": 73 }
def _update_step_xla(self, gradient, variable, key): return self._update_step(gradient, variable)
21,557
102,626
104
chia/rpc/wallet_rpc_api.py
33
9
async def log_in(self, request): fingerprint = request["fingerprint"] if self.service.logged_in_fingerprint == fingerprint: return {"fingerpri
Merge standalone wallet into main (#9793) * wallet changes from pac * cat changes * pool tests * pooling tests passing * offers * lint * mempool_mode * black * linting * workflow files * flake8 * more cleanup * renamed * remove obsolete test, don't cast announcement * memos are not only bytes32 * trade renames * fix rpcs, block_record * wallet rpc, recompile settlement clvm * key derivation * clvm tests * lgtm issues and wallet peers * stash * rename * mypy linting * flake8 * bad initializer * flaky tests * Make CAT wallets only create on verified hints (#9651) * fix clvm tests * return to log lvl warn * check puzzle unhardened * public key, not bytes. api caching change * precommit changes * remove unused import * mypy ci file, tests * ensure balance before creating a tx * Remove CAT logic from full node test (#9741) * Add confirmations and sleeps for wallet (#9742) * use pool executor * rever merge mistakes/cleanup * Fix trade test flakiness (#9751) * remove precommit * older version of black * lint only in super linter * Make announcements in RPC be objects instead of bytes (#9752) * Make announcements in RPC be objects instead of bytes * Lint * misc hint'ish cleanup (#9753) * misc hint'ish cleanup * unremove some ci bits * Use main cached_bls.py * Fix bad merge in main_pac (#9774) * Fix bad merge at 71da0487b9cd5564453ec24b76f1ac773c272b75 * Remove unused ignores * more unused ignores * Fix bad merge at 3b143e705057d6c14e2fb3e00078aceff0552d7e * One more byte32.from_hexstr * Remove obsolete test * remove commented out * remove duplicate payment object * remove long sync * remove unused test, noise * memos type * bytes32 * make it clear it's a single state at a time * copy over asset ids from pacr * file endl linter * Update chia/server/ws_connection.py Co-authored-by: dustinface <35775977+xdustinface@users.noreply.github.com> Co-authored-by: Matt Hauff <quexington@gmail.com> Co-authored-by: Kyle Altendorf <sda@fstab.net> Co-authored-by: dustinface <35775977+xdustinface@users.noreply.github.com>
log_in
89f15f591cc3cc3e8ae40e95ffc802f7f2561ece
chia-blockchain
wallet_rpc_api.py
10
9
https://github.com/Chia-Network/chia-blockchain.git
3
67
0
25
120
Python
{ "docstring": "\n Logs in the wallet with a specific key.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
async def log_in(self, request): fingerprint = request["fingerprint"] if self.service.logged_in_fingerprint == fingerprint: return {"fingerprint": fingerprint} await self._stop_wallet() started = await self.service._start(fingerprint) if started is True: return {"fingerprint": fingerprint} return {"success": False, "error": "Unknown Error"}
17,927
85,091
56
zerver/webhooks/bitbucket2/tests.py
18
6
def test_bitbucket2_on_push_commits_multiple_committers_with_others(self) -> None: commit_info = "* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-na
webhooks: Pick a more reasonable length for short sha. 7 characters are not enough for large projects, so we change it to reasonably longer. As an example, The Linux kernel needs at least 11 characters of sha in its shortened form to identify a revision. We pick 11 so it should work for most of the projects. Signed-off-by: Zixuan James Li <p359101898@gmail.com>
test_bitbucket2_on_push_commits_multiple_committers_with_others
4e4689949438735622bdf669f05d218c671e7e01
zulip
tests.py
9
6
https://github.com/zulip/zulip.git
1
24
0
16
52
Python
{ "docstring": "Tomasz [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 10 commits to branch master. Commits by Tomasz (4), James (3), Brendon (2) and others (1).\\n\\n{commit_info*9}* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))", "language": "en", "n_whitespaces": 20, "n_words": 21, "vocab_size": 20 }
def test_bitbucket2_on_push_commits_multiple_committers_with_others(self) -> None: commit_info = "* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n" expected_message = f self.check_webhook( "push_multiple_committers_with_others", TOPIC_BRANCH_EVENTS, expected_message )
117,623
321,275
646
qutebrowser/mainwindow/tabwidget.py
122
47
def drawControl(self, element, opt, p, widget=None): if element not in [QStyle.ControlElement.CE_TabBarTab, QStyle.ControlElement.CE_TabBarTabShape, QStyle.ControlElement.CE_TabBarTabLabel]: # Let the real style draw it. self._style.drawControl(element, opt, p, widget) return layouts = self._tab_layout(opt) if layouts is None: log.misc.warning("Could not get layouts for tab!") return if element == QStyle.ControlElement.CE_TabBarTab: # We override this so we can control TabBarTabShape/TabBarTabLabel. self.drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget) self.drawControl(QStyle.ControlElement.CE_TabBarTabLabel, opt, p, widget) elif element == QStyle.ControlElement.CE_TabBarTabShape: p.fillRect(opt.rect, opt.palette.window()) self._draw_indicator(layouts, opt, p) # We use super() rather than self._style here because we don't want # any sophisticated drawing. super().drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget) elif element == QStyle.ControlElement.CE_TabBarTabLabe
Run scripts/dev/rewrite_enums.py
drawControl
0877fb0d78635692e481c8bde224fac5ad0dd430
qutebrowser
tabwidget.py
15
30
https://github.com/qutebrowser/qutebrowser.git
8
286
0
86
435
Python
{ "docstring": "Override drawControl to draw odd tabs in a different color.\n\n Draws the given element with the provided painter with the style\n options specified by option.\n\n Args:\n element: ControlElement\n opt: QStyleOption\n p: QPainter\n widget: QWidget\n ", "language": "en", "n_whitespaces": 106, "n_words": 34, "vocab_size": 31 }
def drawControl(self, element, opt, p, widget=None): if element not in [QStyle.ControlElement.CE_TabBarTab, QStyle.ControlElement.CE_TabBarTabShape, QStyle.ControlElement.CE_TabBarTabLabel]: # Let the real style draw it. self._style.drawControl(element, opt, p, widget) return layouts = self._tab_layout(opt) if layouts is None: log.misc.warning("Could not get layouts for tab!") return if element == QStyle.ControlElement.CE_TabBarTab: # We override this so we can control TabBarTabShape/TabBarTabLabel. self.drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget) self.drawControl(QStyle.ControlElement.CE_TabBarTabLabel, opt, p, widget) elif element == QStyle.ControlElement.CE_TabBarTabShape: p.fillRect(opt.rect, opt.palette.window()) self._draw_indicator(layouts, opt, p) # We use super() rather than self._style here because we don't want # any sophisticated drawing. super().drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget) elif element == QStyle.ControlElement.CE_TabBarTabLabel: if not opt.icon.isNull() and layouts.icon.isValid(): self._draw_icon(layouts, opt, p) alignment = (config.cache['tabs.title.alignment'] | Qt.AlignmentFlag.AlignVCenter | Qt.TextFlag.TextHideMnemonic) self._style.drawItemText(p, layouts.text, int(alignment), opt.palette, bool(opt.state & QStyle.StateFlag.State_Enabled), opt.text, QPalette.ColorRole.WindowText) else: raise ValueError("Invalid element {!r}".format(element))
4,685
24,033
110
ppocr/modeling/heads/rec_abinet_head.py
48
22
def _get_mask(length, max_length): length = length.unsqueeze(-1) B = paddle.shape(length)[0]
[New Rec] add vitstr and ABINet
_get_mask
c503dc2f9352272615dc3cc11737b833036c6ccc
PaddleOCR
rec_abinet_head.py
12
14
https://github.com/PaddlePaddle/PaddleOCR.git
1
148
0
35
230
Python
{ "docstring": "Generate a square mask for the sequence. The masked positions are filled with float('-inf').\n Unmasked positions are filled with float(0.0).\n ", "language": "en", "n_whitespaces": 30, "n_words": 20, "vocab_size": 16 }
def _get_mask(length, max_length): length = length.unsqueeze(-1) B = paddle.shape(length)[0] grid = paddle.arange(0, max_length).unsqueeze(0).tile([B, 1]) zero_mask = paddle.zeros([B, max_length], dtype='float32') inf_mask = paddle.full([B, max_length], '-inf', dtype='float32') diag_mask = paddle.diag( paddle.full( [max_length], '-inf', dtype=paddle.float32), offset=0, name=None) mask = paddle.where(grid >= length, inf_mask, zero_mask) mask = mask.unsqueeze(1) + diag_mask return mask.unsqueeze(1)
26,358
118,683
204
lib/tests/streamlit/config_test.py
52
18
def test_config_options_removed_on_reparse(self): global_config_path = "/mock/home/folder/.streamlit/config.toml" makedirs_patch = pat
Report sharing removal (#4260) The report sharing feature is a substantial but completely unused portion of the code in Streamlit's underlying machinery. The feature was created early on, used by just a few groups, and has not been used by anyone for a while, as indicated by no activity in the associated S3 buckets. This commit removes that code to make the remaining code easier to navigate and understand.
test_config_options_removed_on_reparse
dd9084523e365e637443ea351eaaaa25f52d8412
streamlit
config_test.py
12
25
https://github.com/streamlit/streamlit.git
1
147
0
32
268
Python
{ "docstring": "Test that config options that are removed in a file are also removed\n from our _config_options dict.\n [theme]\n base = \"dark\"\n font = \"sans serif\"\n \n [theme]\n base = \"dark\"\n ", "language": "en", "n_whitespaces": 86, "n_words": 29, "vocab_size": 21 }
def test_config_options_removed_on_reparse(self): global_config_path = "/mock/home/folder/.streamlit/config.toml" makedirs_patch = patch("streamlit.config.os.makedirs") makedirs_patch.return_value = True pathexists_patch = patch("streamlit.config.os.path.exists") pathexists_patch.side_effect = lambda path: path == global_config_path global_config = open_patch = patch("streamlit.config.open", mock_open(read_data=global_config)) with open_patch, makedirs_patch, pathexists_patch: config.get_config_options() self.assertEqual("dark", config.get_option("theme.base")) self.assertEqual("sans serif", config.get_option("theme.font")) global_config = open_patch = patch("streamlit.config.open", mock_open(read_data=global_config)) with open_patch, makedirs_patch, pathexists_patch: config.get_config_options(force_reparse=True) self.assertEqual("dark", config.get_option("theme.base")) self.assertEqual(None, config.get_option("theme.font"))
39,411
163,265
511
pandas/core/indexes/base.py
178
27
def __getitem__(self, key): getitem = self._data.__getitem__ if is_integer(key) or is_float(key): # GH#44051 exclude bool, which would return a 2d ndarray key = com.cast_scalar_indexer(key, warn_float=True) return getitem(key) if isinstance(key, slice): # This case is separated from the conditional above to avoid # pessimization com.is_bool_indexer and ndim checks. result = getitem(key) # Going through simple_new for performance. return type(self)._simple_new(result, name=self._name) if com.is_bool_indexer(key): # if we have list[bools, length=1e5] then doing this check+convert # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ # time below from 3.8 ms to 496 µs # if we already have ndarray[bool], the overhead is 1.4 µs or .25% key = np.asarray(key, dtype=bool) result = getitem(key) # Because we ruled out integer above, we always get an arraylike here if result.ndim > 1: deprecate_ndim_indexing(result) if hasattr(result, "_ndarray"): # error: Item "ndarray[Any, Any]" of "Union[ExtensionArray, # ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr] # i.e. NDArrayBackedExtensionArray # Unpack to ndarray for MPL compat return result._ndarray # type: ignore[union-attr] return result # NB: Using
TYP: Ignore numpy related issues (#45244)
__getitem__
d603d43df2057ecdf74010d9dadc735e37f8f7b5
pandas
base.py
11
17
https://github.com/pandas-dev/pandas.git
7
139
0
123
236
Python
{ "docstring": "\n Override numpy.ndarray's __getitem__ method to work as desired.\n\n This function adds lists and Series as valid boolean indexers\n (ndarrays only supports ndarray with dtype=bool).\n\n If resulting ndim != 1, plain ndarray is returned instead of\n corresponding `Index` subclass.\n\n ", "language": "en", "n_whitespaces": 81, "n_words": 38, "vocab_size": 36 }
def __getitem__(self, key): getitem = self._data.__getitem__ if is_integer(key) or is_float(key): # GH#44051 exclude bool, which would return a 2d ndarray key = com.cast_scalar_indexer(key, warn_float=True) return getitem(key) if isinstance(key, slice): # This case is separated from the conditional above to avoid # pessimization com.is_bool_indexer and ndim checks. result = getitem(key) # Going through simple_new for performance. return type(self)._simple_new(result, name=self._name) if com.is_bool_indexer(key): # if we have list[bools, length=1e5] then doing this check+convert # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ # time below from 3.8 ms to 496 µs # if we already have ndarray[bool], the overhead is 1.4 µs or .25% key = np.asarray(key, dtype=bool) result = getitem(key) # Because we ruled out integer above, we always get an arraylike here if result.ndim > 1: deprecate_ndim_indexing(result) if hasattr(result, "_ndarray"): # error: Item "ndarray[Any, Any]" of "Union[ExtensionArray, # ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr] # i.e. NDArrayBackedExtensionArray # Unpack to ndarray for MPL compat return result._ndarray # type: ignore[union-attr] return result # NB: Using _constructor._simple_new would break if MultiIndex # didn't override __getitem__ return self._constructor._simple_new(result, name=self._name)
14,521
67,430
5
erpnext/selling/report/sales_order_analysis/sales_order_analysis.py
14
9
def get_data(conditions, filters): data = frappe.db.sql( .format( conditions=conditions ), filters,
style: format code with black
get_data
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
sales_order_analysis.py
11
44
https://github.com/frappe/erpnext.git
1
33
0
13
51
Python
{ "docstring": "\n\t\tSELECT\n\t\t\tso.transaction_date as date,\n\t\t\tsoi.delivery_date as delivery_date,\n\t\t\tso.name as sales_order,\n\t\t\tso.status, so.customer, soi.item_code,\n\t\t\tDATEDIFF(CURDATE(), soi.delivery_date) as delay_days,\n\t\t\tIF(so.status in ('Completed','To Bill'), 0, (SELECT delay_days)) as delay,\n\t\t\tsoi.qty, soi.delivered_qty,\n\t\t\t(soi.qty - soi.delivered_qty) AS pending_qty,\n\t\t\tIF((SELECT pending_qty) = 0, (TO_SECONDS(Max(dn.posting_date))-TO_SECONDS(so.transaction_date)), 0) as time_taken_to_deliver,\n\t\t\tIFNULL(SUM(sii.qty), 0) as billed_qty,\n\t\t\tsoi.base_amount as amount,\n\t\t\t(soi.delivered_qty * soi.base_rate) as delivered_qty_amount,\n\t\t\t(soi.billed_amt * IFNULL(so.conversion_rate, 1)) as billed_amount,\n\t\t\t(soi.base_amount - (soi.billed_amt * IFNULL(so.conversion_rate, 1))) as pending_amount,\n\t\t\tsoi.warehouse as warehouse,\n\t\t\tso.company, soi.name,\n\t\t\tsoi.description as description\n\t\tFROM\n\t\t\t`tabSales Order` so,\n\t\t\t(`tabSales Order Item` soi\n\t\tLEFT JOIN `tabSales Invoice Item` sii\n\t\t\tON sii.so_detail = soi.name and sii.docstatus = 1)\n\t\tLEFT JOIN `tabDelivery Note Item` dni\n\t\t\ton dni.so_detail = soi.name\n\t\tRIGHT JOIN `tabDelivery Note` dn\n\t\t\ton dni.parent = dn.name and dn.docstatus = 1\n\t\tWHERE\n\t\t\tsoi.parent = so.name\n\t\t\tand so.status not in ('Stopped', 'Closed', 'On Hold')\n\t\t\tand so.docstatus = 1\n\t\t\t{conditions}\n\t\tGROUP BY soi.name\n\t\tORDER BY so.transaction_date ASC, soi.item_code ASC\n\t", "language": "en", "n_whitespaces": 112, "n_words": 146, "vocab_size": 102 }
def get_data(conditions, filters): data = frappe.db.sql( .format( conditions=conditions ), filters, as_dict=1, ) return data
55,277
218,391
140
python3.10.4/Lib/inspect.py
40
16
def getcoroutinelocals(coroutine): frame = getattr(coroutine, "cr_frame", None) if frame is not None: return frame.f_locals else: return {} ############################################################################### ### Function Signature Object (PEP 362) ###################################################
add python 3.10.4 for windows
getcoroutinelocals
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
inspect.py
9
6
https://github.com/XX-net/XX-Net.git
2
31
0
33
118
Python
{ "docstring": "\n Get the mapping of coroutine local variables to their current values.\n\n A dict is returned, with the keys the local variable names and values the\n bound values.", "language": "en", "n_whitespaces": 36, "n_words": 27, "vocab_size": 22 }
def getcoroutinelocals(coroutine): frame = getattr(coroutine, "cr_frame", None) if frame is not None: return frame.f_locals else: return {} ############################################################################### ### Function Signature Object (PEP 362) ############################################################################### _WrapperDescriptor = type(type.__call__) _MethodWrapper = type(all.__call__) _ClassMethodWrapper = type(int.__dict__['from_bytes']) _NonUserDefinedCallables = (_WrapperDescriptor, _MethodWrapper, _ClassMethodWrapper, types.BuiltinFunctionType)
80,975
272,189
51
keras/integration_test/forwardprop_test.py
14
9
def _forward_over_back_hessian(f, params, use_pfor, dtype=None): return _vectorize_parameters( functools.partial(_hvp, f, params), params, use_pfor=use_pfor, dtype=dtype,
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
_forward_over_back_hessian
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
forwardprop_test.py
9
7
https://github.com/keras-team/keras.git
1
39
0
13
55
Python
{ "docstring": "Computes the full Hessian matrix for the scalar-valued f(*params).\n\n Args:\n f: A function taking `params` and returning a scalar.\n params: A possibly nested structure of tensors.\n use_pfor: If true, uses `tf.vectorized_map` calls instead of looping.\n dtype: Required if `use_pfor=False`. A possibly nested structure of dtypes\n (e.g. `tf.float32`) matching the structure of `f`'s returns.\n\n Returns:\n A possibly nested structure of matrix slices corresponding to `params`. Each\n slice has shape [P, p_s] where `p_s` is the number of parameters (`tf.size`)\n in the corresponding element of `params` and `P` is the total number of\n parameters (`sum_s(p_s)`). The full matrix can be obtained by concatenating\n along the second axis.\n ", "language": "en", "n_whitespaces": 166, "n_words": 105, "vocab_size": 73 }
def _forward_over_back_hessian(f, params, use_pfor, dtype=None): return _vectorize_parameters( functools.partial(_hvp, f, params), params, use_pfor=use_pfor, dtype=dtype, )
75,837
259,605
603
sklearn/linear_model/_stochastic_gradient.py
125
24
def predict_proba(self, X): check_is_fitted(self) # TODO(1.3): Remove "log" if self.loss in ("log_loss", "log"): return self._predict_proba_lr(X) elif self.loss == "modified_huber": binary = len(self.classes_) == 2 scores = self.decision_function(X) if binary: prob2 = np.ones((scores.shape[0], 2)) prob = prob2[:, 1] else: prob = scores np.clip(scores, -1, 1, prob) prob += 1.0 prob /= 2.0 if binary: prob2[:, 0] -= prob prob = prob2 else: # the above might assign zero to all classes, which doesn't # normalize neatly; work around this to produce uniform # probabilities prob_sum = prob.sum(axis=1) all_zero = prob_sum == 0 if np.any(all_zero): prob[all_zero, :] = 1 prob_sum[all_zero] = len(self.classes_) # normalize prob /= p
DEP loss "log" in favor of "log loss" in SGDClassifier (#23046) Co-authored-by: Julien Jerphanion <git@jjerphan.xyz> Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>
predict_proba
0c20ba744966d23ede67cffd7c5d2e0d01cd0658
scikit-learn
_stochastic_gradient.py
17
33
https://github.com/scikit-learn/scikit-learn.git
6
204
0
85
328
Python
{ "docstring": "Probability estimates.\n\n This method is only available for log loss and modified Huber loss.\n\n Multiclass probability estimates are derived from binary (one-vs.-rest)\n estimates by simple normalization, as recommended by Zadrozny and\n Elkan.\n\n Binary probability estimates for loss=\"modified_huber\" are given by\n (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions\n it is necessary to perform proper probability calibration by wrapping\n the classifier with\n :class:`~sklearn.calibration.CalibratedClassifierCV` instead.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Input data for prediction.\n\n Returns\n -------\n ndarray of shape (n_samples, n_classes)\n Returns the probability of the sample for each class in the model,\n where classes are ordered as they are in `self.classes_`.\n\n References\n ----------\n Zadrozny and Elkan, \"Transforming classifier scores into multiclass\n probability estimates\", SIGKDD'02,\n https://dl.acm.org/doi/pdf/10.1145/775047.775151\n\n The justification for the formula in the loss=\"modified_huber\"\n case is in the appendix B in:\n http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf\n ", "language": "en", "n_whitespaces": 339, "n_words": 138, "vocab_size": 98 }
def predict_proba(self, X): check_is_fitted(self) # TODO(1.3): Remove "log" if self.loss in ("log_loss", "log"): return self._predict_proba_lr(X) elif self.loss == "modified_huber": binary = len(self.classes_) == 2 scores = self.decision_function(X) if binary: prob2 = np.ones((scores.shape[0], 2)) prob = prob2[:, 1] else: prob = scores np.clip(scores, -1, 1, prob) prob += 1.0 prob /= 2.0 if binary: prob2[:, 0] -= prob prob = prob2 else: # the above might assign zero to all classes, which doesn't # normalize neatly; work around this to produce uniform # probabilities prob_sum = prob.sum(axis=1) all_zero = prob_sum == 0 if np.any(all_zero): prob[all_zero, :] = 1 prob_sum[all_zero] = len(self.classes_) # normalize prob /= prob_sum.reshape((prob.shape[0], -1)) return prob else: raise NotImplementedError( "predict_(log_)proba only supported when" " loss='log_loss' or loss='modified_huber' " "(%r given)" % self.loss )
5,027
26,573
216
saleor/plugins/openid_connect/utils.py
86
25
def fetch_jwks(jwks_url) -> Optional[dict]: response = None try: response = requests.get(jwks_url, timeout=REQUEST_TIMEOUT) response.raise_for_status() jwks = response.json() except requests.exceptions.RequestException: logger.exception("Unable to fetch jwks from %s", jwks_url) raise AuthenticationError("Unable to finalize the authentication process.") except json.JSONDecodeError: content = response.content if response else "Unable to find the response" logger.exception( "Unable to decode the response from auth service with jwks. " "Response: %s", content, ) raise AuthenticationError("Unable to finalize the authentication process.") keys = jwks.get("keys", []) if
Make OIDC plugin public (#9406) * Make OIDC plugin public * Add missing dependency package * Apply changes after review * Update changelog * Apply changes after review * Add const file
fetch_jwks
7d2e77c5f235ca60a2bf3ee02f4f9a8b10b03214
saleor
utils.py
12
28
https://github.com/saleor/saleor.git
5
122
0
59
210
Python
{ "docstring": "Fetch JSON Web Key Sets from a provider.\n\n Fetched keys will be stored in the cache to the reduced amount of possible\n requests.\n :raises AuthenticationError\n ", "language": "en", "n_whitespaces": 37, "n_words": 25, "vocab_size": 24 }
def fetch_jwks(jwks_url) -> Optional[dict]: response = None try: response = requests.get(jwks_url, timeout=REQUEST_TIMEOUT) response.raise_for_status() jwks = response.json() except requests.exceptions.RequestException: logger.exception("Unable to fetch jwks from %s", jwks_url) raise AuthenticationError("Unable to finalize the authentication process.") except json.JSONDecodeError: content = response.content if response else "Unable to find the response" logger.exception( "Unable to decode the response from auth service with jwks. " "Response: %s", content, ) raise AuthenticationError("Unable to finalize the authentication process.") keys = jwks.get("keys", []) if not keys: logger.warning("List of JWKS keys is empty") cache.set(JWKS_KEY, keys, JWKS_CACHE_TIME) return keys