diff --git "a/summarization_dataset_3k_deduped.json" "b/summarization_dataset_3k_deduped.json" new file mode 100644--- /dev/null +++ "b/summarization_dataset_3k_deduped.json" @@ -0,0 +1,90646 @@ +[ + { + "id": 281136, + "commit_id": "ea964109d654394cc0a5237e6ec5510ba6404097", + "repo": "OpenBBTerminal", + "path": "gamestonk_terminal/cryptocurrency/due_diligence/dd_controller.py", + "file_name": "dd_controller.py", + "fun_name": "call_bc", + "commit_message": "Crypto menu refactor (#1119)\n\n* enabled some crypto commands in dd to be called independent of source loaded\r\n\r\n* support for coin_map_df in all dd functions + load ta and plot chart refactor\r\n\r\n* updated tests and removed coingecko scrapping where possible\r\n\r\n* removed ref of command from hugo\r\n\r\n* updated pycoingecko version\r\n\r\n* refactoring load\r\n\r\n* refactored load to fetch prices; pred can run independent of source now\r\n\r\n* load by default usd on cp/cg and usdt on cb/bin\r\n\r\n* updated to rich for formatting and updated dependencies\r\n\r\n* fixed changes requested\r\n\r\n* update docs\r\n\r\n* revert discord requirements\r\n\r\n* removed absolute from calculate change for price\r\n\r\n* fixing pr issues\r\n\r\n* fix loading issue when similar coins exist, move coins to home, fill n/a\r\n\r\n* update docs for coins\r\n\r\n* adds load to ta and pred menu", + "code": "def call_bc(self, other_args):\n \n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"bc\",\n description=,\n )\n ns_parser = parse_known_args_and_warn(\n parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED\n )\n\n if ns_parser:\n pycoingecko_view.display_bc(self.coin_map_df[\"CoinGecko\"], ns_parser.export)\n", + "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 130, + "n_words": 22, + "vocab_size": 20, + "complexity": 2, + "nloc": 15, + "token_counts": 61, + "n_ast_nodes": 97, + "n_identifiers": 18, + "d_id": 83548, + "documentation": { + "docstring": "Process bc command\n Blockchain explorers URLs for loaded coin. Those are sites like etherescan.io or polkascan.io\n in which you can see all blockchain data e.g. all txs, all tokens, all contracts...\n ", + "n_words": 31, + "vocab_size": 28, + "n_whitespaces": 84, + "language": "en" + } + }, + { + "id": 30126, + "commit_id": "fa2ad657482aca9dc628e6d7062b8badf2706bb6", + "repo": "spotify-downloader", + "path": "tests/conftest.py", + "file_name": "conftest.py", + "fun_name": "communicate", + "commit_message": "v4 init", + "code": "async def communicate(self):\n \n assert self._input.is_file()\n self._output.open(\"w\").close()\n return (None, None)\n", + "url": "https://github.com/spotDL/spotify-downloader.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 37, + "n_words": 9, + "vocab_size": 9, + "complexity": 1, + "nloc": 4, + "token_counts": 32, + "n_ast_nodes": 57, + "n_identifiers": 7, + "d_id": 5330, + "documentation": { + "docstring": "\n Ensure that the file has been download, and create empty output file,\n to avoid infinite loop.\n ", + "n_words": 16, + "vocab_size": 16, + "n_whitespaces": 38, + "language": "en" + } + }, + { + "id": 138092, + "commit_id": "1510fb2cd631b2776092fb45ee4082e5e65f16f8", + "repo": "ray", + "path": "python/ray/tune/tests/test_actor_reuse.py", + "file_name": "test_actor_reuse.py", + "fun_name": "test_multi_trial_reuse_with_failing", + "commit_message": "[air/tune] Internal resource management 2 - Ray Tune to use new Ray AIR resource manager (#30016)\n\nIncludes/depends on #30777\r\n\r\nTLDR: This PR refactors Ray Tune's resource management to use a central AIR resource management package instead of the tightly coupled PlacementGroupManager.\r\n\r\nRay Tune's resource management currently uses a tightly coupled placement group manager. This leads to a number of shortcomings:\r\n- The tight coupling on the manager side (e.g. PG manager keeps track of trials) prevents re-usability\r\n- The tight coupling on the trial executor side prevents using different resource management strategies (e.g. shared or budget-based)\r\n- It's hard to test independently. Tests for the resource management require a simulated tune setup.\r\n\r\nTo improve stability, extensibility, and maintainability, this PR moves the resource management logic into a central `ray.air.execution.resources` subpackage. The resource management has a simple API that works with `ResourceRequest`s and `AllocatedResources` to manage requested and assigned resources, respectively. The actual resource management can then be anything - per default it is a placement group based manager, but this PR also introduces a PoC budget-based manager that can be plugged in.\r\n\r\nThe PR does not substantially change existing tests, so we can be certain that the new resource model is a fully compatible replacement for the old placement group manager.\r\n\r\nSigned-off-by: Kai Fricke ", + "code": "def test_multi_trial_reuse_with_failing(ray_start_4_cpus_extra):\n \n os.environ[\"TUNE_MAX_PENDING_TRIALS_PG\"] = \"2\"\n\n register_trainable(\"foo2\", MyResettableClass)\n\n [trial1, trial2, trial3, trial4] = tune.run(\n \"foo2\",\n config={\n \"fail\": tune.grid_search([False, True, False, False]),\n \"id\": -1,\n \"sleep\": 2,\n },\n reuse_actors=True,\n resources_per_trial={\"cpu\": 2},\n raise_on_failed_trial=False,\n ).trials\n\n assert trial1.last_result[\"num_resets\"] == 0\n assert trial3.last_result[\"num_resets\"] == 0\n assert trial4.last_result[\"num_resets\"] == 1\n\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 141, + "n_words": 42, + "vocab_size": 36, + "complexity": 1, + "nloc": 17, + "token_counts": 113, + "n_ast_nodes": 183, + "n_identifiers": 19, + "d_id": 31314, + "documentation": { + "docstring": "Test that failing trial's actors are not reused.\n\n - 2 trials can run at the same time\n - Trial 1 succeeds, trial 2 fails\n - Trial 3 will be scheduled after trial 2 failed, so won't reuse actor\n - Trial 4 will be scheduled after trial 1 succeeded, so will reuse actor\n ", + "n_words": 52, + "vocab_size": 34, + "n_whitespaces": 67, + "language": "en" + } + }, + { + "id": 145482, + "commit_id": "b5b4460932505912d88d65134931e0da170fb467", + "repo": "ray", + "path": "python/ray/data/impl/block_list.py", + "file_name": "block_list.py", + "fun_name": "size_bytes", + "commit_message": "Support creating a DatasetPipeline windowed by bytes (#22577)", + "code": "def size_bytes(self) -> int:\n \n size = 0\n has_size = False\n for m in self.get_metadata():\n if m.size_bytes is not None:\n has_size = True\n size += m.size_bytes\n if not has_size:\n return -1\n else:\n return size\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 138, + "n_words": 33, + "vocab_size": 24, + "complexity": 4, + "nloc": 12, + "token_counts": 50, + "n_ast_nodes": 84, + "n_identifiers": 7, + "d_id": 33465, + "documentation": { + "docstring": "Returns the total size in bytes of the blocks, or -1 if not known.", + "n_words": 14, + "vocab_size": 13, + "n_whitespaces": 13, + "language": "en" + } + }, + { + "id": 108335, + "commit_id": "0abe0ce2f2748d1d0383154d045da3609a4b871b", + "repo": "matplotlib", + "path": "lib/matplotlib/colors.py", + "file_name": "colors.py", + "fun_name": "unregister", + "commit_message": "Add a registry for color sequences\n\nColor sequences are simply lists of colors, that we store by name in\na registry. The registry is modelled similar to the ColormapRegistry\nto 1) support immutable builtin color sequences and 2) to return copies\nso that one cannot mess with the global definition of the color sequence\nthrough an obtained instance.\n\nFor now, I've made the sequences used for `ListedColormap`s available\nas builtin sequences, but that's open for discussion.\n\nMore usage documentation should be added in the color examples and/or\ntutorials, but I'll wait with that till after the general approval of\nthe structure and API. One common use case will be\n\n```\nplt.rc_params['axes.prop_cycle'] = plt.cycler(color=plt.color_sequences['Pastel1')\n```\n\nCo-authored-by: Elliott Sales de Andrade ", + "code": "def unregister(self, name):\n \n if name in self._BUILTIN_COLOR_SEQUENCES:\n raise ValueError(\n f\"Cannot unregister builtin color sequence {name!r}\")\n self._color_sequences.pop(name, None)\n\n\n_color_sequences = ColorSequenceRegistry()\n\n", + "url": "https://github.com/matplotlib/matplotlib.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 66, + "n_words": 20, + "vocab_size": 20, + "complexity": 2, + "nloc": 5, + "token_counts": 31, + "n_ast_nodes": 65, + "n_identifiers": 8, + "d_id": 23145, + "documentation": { + "docstring": "\n Remove a sequence from the registry.\n\n You cannot remove built-in color sequences.\n\n If the name is not registered, returns with no error.\n ", + "n_words": 22, + "vocab_size": 21, + "n_whitespaces": 51, + "language": "en" + } + }, + { + "id": 218114, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/importlib/_bootstrap_external.py", + "file_name": "_bootstrap_external.py", + "fun_name": "source_from_cache", + "commit_message": "add python 3.10.4 for windows", + "code": "def source_from_cache(path):\n \n if sys.implementation.cache_tag is None:\n raise NotImplementedError('sys.implementation.cache_tag is None')\n path = _os.fspath(path)\n head, pycache_filename = _path_split(path)\n found_in_pycache_prefix = False\n if sys.pycache_prefix is not None:\n stripped_path = sys.pycache_prefix.rstrip(path_separators)\n if head.startswith(stripped_path + path_sep):\n head = head[len(stripped_path):]\n found_in_pycache_prefix = True\n if not found_in_pycache_prefix:\n head, pycache = _path_split(head)\n if pycache != _PYCACHE:\n raise ValueError(f'{_PYCACHE} not bottom-level directory in '\n f'{path!r}')\n dot_count = pycache_filename.count('.')\n if dot_count not in {2, 3}:\n raise ValueError(f'expected only 2 or 3 dots in {pycache_filename!r}')\n elif dot_count == 3:\n optimization = pycache_filename.rsplit('.', 2)[-2]\n if not optimization.startswith(_OPT):\n raise ValueError(\"optimization portion of filename does not start \"\n f\"with {_OPT!r}\")\n opt_level = optimization[len(_OPT):]\n if not opt_level.isalnum():\n raise ValueError(f\"optimization level {optimization!r} is not an \"\n \"alphanumeric value\")\n base_filename = pycache_filename.partition('.')[0]\n return _path_join(head, base_filename + SOURCE_SUFFIXES[0])\n\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 366, + "n_words": 121, + "vocab_size": 79, + "complexity": 10, + "nloc": 30, + "token_counts": 212, + "n_ast_nodes": 378, + "n_identifiers": 33, + "d_id": 55140, + "documentation": { + "docstring": "Given the path to a .pyc. file, return the path to its .py file.\n\n The .pyc file does not need to exist; this simply returns the path to\n the .py file calculated to correspond to the .pyc file. If path does\n not conform to PEP 3147/488 format, ValueError will be raised. If\n sys.implementation.cache_tag is None then NotImplementedError is raised.\n\n ", + "n_words": 59, + "vocab_size": 37, + "n_whitespaces": 75, + "language": "en" + } + }, + { + "id": 47665, + "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", + "repo": "airflow", + "path": "tests/serialization/test_dag_serialization.py", + "file_name": "test_dag_serialization.py", + "fun_name": "test_deps_sorted", + "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", + "code": "def test_deps_sorted(self):\n \n from airflow.operators.empty import EmptyOperator\n from airflow.sensors.external_task import ExternalTaskSensor\n\n execution_date = datetime(2020, 1, 1)\n with DAG(dag_id=\"test_deps_sorted\", start_date=execution_date) as dag:\n task1 = ExternalTaskSensor(\n task_id=\"task1\",\n external_dag_id=\"external_dag_id\",\n mode=\"reschedule\",\n )\n task2 = EmptyOperator(task_id=\"task2\")\n task1 >> task2\n\n serialize_op = SerializedBaseOperator.serialize_operator(dag.task_dict[\"task1\"])\n deps = serialize_op[\"deps\"]\n assert deps == [\n 'airflow.ti_deps.deps.not_in_retry_period_dep.NotInRetryPeriodDep',\n 'airflow.ti_deps.deps.not_previously_skipped_dep.NotPreviouslySkippedDep',\n 'airflow.ti_deps.deps.prev_dagrun_dep.PrevDagrunDep',\n 'airflow.ti_deps.deps.ready_to_reschedule.ReadyToRescheduleDep',\n 'airflow.ti_deps.deps.trigger_rule_dep.TriggerRuleDep',\n ]\n", + "url": "https://github.com/apache/airflow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 256, + "n_words": 49, + "vocab_size": 40, + "complexity": 1, + "nloc": 21, + "token_counts": 109, + "n_ast_nodes": 186, + "n_identifiers": 25, + "d_id": 9201, + "documentation": { + "docstring": "\n Tests serialize_operator, make sure the deps is in order\n ", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 24, + "language": "en" + } + }, + { + "id": 140203, + "commit_id": "f67871c1f7e79adc727b2a15311d9332832d2e8a", + "repo": "ray", + "path": "python/ray/workflow/workflow_storage.py", + "file_name": "workflow_storage.py", + "fun_name": "load_workflow_status", + "commit_message": "[workflow] Fast workflow indexing (#24767)\n\n* workflow indexing\r\n\r\n* simplify workflow storage API\r\n\r\n* Only fix workflow status when updating the status.\r\n\r\n* support status filter", + "code": "def load_workflow_status(self):\n \n return self._status_storage.load_workflow_status(self._workflow_id)\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 18, + "n_words": 4, + "vocab_size": 4, + "complexity": 1, + "nloc": 2, + "token_counts": 17, + "n_ast_nodes": 30, + "n_identifiers": 4, + "d_id": 31892, + "documentation": { + "docstring": "Load workflow status. If we find the previous status updating failed,\n fix it with redo-log transaction recovery.", + "n_words": 17, + "vocab_size": 17, + "n_whitespaces": 23, + "language": "en" + } + }, + { + "id": 178678, + "commit_id": "c723f658e8c11ec92d6ef90c2f42527c67d3f318", + "repo": "Nuitka", + "path": "nuitka/PythonFlavors.py", + "file_name": "PythonFlavors.py", + "fun_name": "isCPythonOfficialPackage", + "commit_message": "Added CPython Official flavor, so far only detected on macOS", + "code": "def isCPythonOfficialPackage():\n \n\n # For macOS however, it's very knowable.\n if isMacOS() and sys.executable.startswith(\n \"/Library/Frameworks/Python.framework/Versions/\"\n ):\n return True\n\n return False\n", + "url": "https://github.com/Nuitka/Nuitka.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 48, + "n_words": 19, + "vocab_size": 18, + "complexity": 3, + "nloc": 6, + "token_counts": 23, + "n_ast_nodes": 44, + "n_identifiers": 5, + "d_id": 42789, + "documentation": { + "docstring": "Official CPython download, kind of hard to detect since self-compiled doesn't change much.", + "n_words": 13, + "vocab_size": 13, + "n_whitespaces": 12, + "language": "en" + } + }, + { + "id": 86527, + "commit_id": "bc59434031930199dcdc056943c2ba4a17bbd5c8", + "repo": "sentry", + "path": "src/sentry/tasks/post_process.py", + "file_name": "post_process.py", + "fun_name": "update_existing_attachments", + "commit_message": "ref(perf-issues): Modularize post_process_group (ISP-11) (#39594)\n\nFully modularizes `post_process_group` as final step before adding\r\nmultiple event types to it.", + "code": "def update_existing_attachments(job):\n \n # Patch attachments that were ingested on the standalone path.\n with sentry_sdk.start_span(op=\"tasks.post_process_group.update_existing_attachments\"):\n try:\n from sentry.models import EventAttachment\n\n event = job[\"event\"]\n\n EventAttachment.objects.filter(\n project_id=event.project_id, event_id=event.event_id\n ).update(group_id=event.group_id)\n except Exception:\n logger.exception(\"Failed to update existing attachments\")\n\n", + "url": "https://github.com/getsentry/sentry.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 126, + "n_words": 33, + "vocab_size": 33, + "complexity": 2, + "nloc": 10, + "token_counts": 66, + "n_ast_nodes": 116, + "n_identifiers": 18, + "d_id": 18118, + "documentation": { + "docstring": "\n Attaches the group_id to all event attachments that were either:\n\n 1) ingested prior to the event via the standalone attachment endpoint.\n 2) part of a different group before reprocessing started.\n ", + "n_words": 30, + "vocab_size": 26, + "n_whitespaces": 43, + "language": "en" + } + }, + { + "id": 204338, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/contrib/staticfiles/finders.py", + "file_name": "finders.py", + "fun_name": "list", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def list(self, ignore_patterns):\n \n raise NotImplementedError(\n \"subclasses of BaseFinder must provide a list() method\"\n )\n\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 46, + "n_words": 14, + "vocab_size": 14, + "complexity": 1, + "nloc": 4, + "token_counts": 13, + "n_ast_nodes": 25, + "n_identifiers": 4, + "d_id": 50701, + "documentation": { + "docstring": "\n Given an optional list of paths to ignore, return a two item iterable\n consisting of the relative path and storage instance.\n ", + "n_words": 21, + "vocab_size": 20, + "n_whitespaces": 43, + "language": "en" + } + }, + { + "id": 3800, + "commit_id": "a3aae8017a0a40ff2006e2567f71dccb04c997a5", + "repo": "airbyte", + "path": "airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_async_job.py", + "file_name": "test_async_job.py", + "fun_name": "test_less_jobs", + "commit_message": "🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805)\n\n* Facebook Marketing performance improvement\r\n\r\n* add comments and little refactoring\r\n\r\n* fix integration tests with the new config\r\n\r\n* improve job status handling, limit concurrency to 10\r\n\r\n* fix campaign jobs, refactor manager\r\n\r\n* big refactoring of async jobs, support random order of slices\r\n\r\n* update source _read_incremental to hook new state logic\r\n\r\n* fix issues with timeout\r\n\r\n* remove debugging and clean up, improve retry logic\r\n\r\n* merge changes from #8234\r\n\r\n* fix call super _read_increment\r\n\r\n* generalize batch execution, add use_batch flag\r\n\r\n* improve coverage, do some refactoring of spec\r\n\r\n* update test, remove overrides of source\r\n\r\n* add split by AdSet\r\n\r\n* add smaller insights\r\n\r\n* fix end_date < start_date case\r\n\r\n* add account_id to PK\r\n\r\n* add notes\r\n\r\n* fix new streams\r\n\r\n* fix reversed incremental stream\r\n\r\n* update spec.json for SAT\r\n\r\n* upgrade CDK and bump version\r\n\r\nCo-authored-by: Dmytro Rezchykov \r\nCo-authored-by: Eugene Kulak ", + "code": "def test_less_jobs(self, api, started_job, batch):\n \n jobs = [started_job for _ in range(49)]\n\n update_in_batch(api=api, jobs=jobs)\n\n assert started_job.update_job.call_count == 49\n assert len(api.new_batch.return_value) == 49\n batch.execute.assert_called_once()\n", + "url": "https://github.com/airbytehq/airbyte.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 65, + "n_words": 23, + "vocab_size": 20, + "complexity": 2, + "nloc": 6, + "token_counts": 60, + "n_ast_nodes": 93, + "n_identifiers": 16, + "d_id": 563, + "documentation": { + "docstring": "Should update all jobs when number of jobs less than max size of batch", + "n_words": 14, + "vocab_size": 12, + "n_whitespaces": 13, + "language": "en" + } + }, + { + "id": 118684, + "commit_id": "dd9084523e365e637443ea351eaaaa25f52d8412", + "repo": "streamlit", + "path": "lib/tests/streamlit/config_test.py", + "file_name": "config_test.py", + "fun_name": "test_load_global_local_flag_config", + "commit_message": "Report sharing removal (#4260)\n\nThe report sharing feature is a substantial but completely unused portion of the code in Streamlit's underlying machinery. The feature was created early on, used by just a few groups, and has not been used by anyone for a while, as indicated by no activity in the associated S3 buckets. This commit removes that code to make the remaining code easier to navigate and understand.", + "code": "def test_load_global_local_flag_config(self):\n \n\n global_config = \n\n local_config = \n\n global_config_path = \"/mock/home/folder/.streamlit/config.toml\"\n local_config_path = os.path.join(os.getcwd(), \".streamlit/config.toml\")\n\n global_open = mock_open(read_data=global_config)\n local_open = mock_open(read_data=local_config)\n open = mock_open()\n open.side_effect = [global_open.return_value, local_open.return_value]\n\n open_patch = patch(\"streamlit.config.open\", open)\n # patch streamlit.*.os.* instead of os.* for py35 compat\n makedirs_patch = patch(\"streamlit.config.os.makedirs\")\n makedirs_patch.return_value = True\n pathexists_patch = patch(\"streamlit.config.os.path.exists\")\n pathexists_patch.side_effect = lambda path: path in [\n global_config_path,\n local_config_path,\n ]\n\n with open_patch, makedirs_patch, pathexists_patch:\n config.get_config_options(options_from_flags={\"theme.font\": \"monospace\"})\n\n self.assertEqual(\"light\", config.get_option(\"theme.base\"))\n self.assertEqual(\"#FFFFFF\", config.get_option(\"theme.textColor\"))\n self.assertEqual(\"monospace\", config.get_option(\"theme.font\"))\n", + "url": "https://github.com/streamlit/streamlit.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 257, + "n_words": 70, + "vocab_size": 58, + "complexity": 1, + "nloc": 31, + "token_counts": 163, + "n_ast_nodes": 292, + "n_identifiers": 26, + "d_id": 26359, + "documentation": { + "docstring": "Test that CLI flags have higher priority than both\n ~/.streamlit/config.toml and $CWD/.streamlit/config.toml at parse time.\n \n [theme]\n base = \"dark\"\n font = \"sans serif\"\n textColor = \"#FFFFFF\"\n \n [theme]\n base = \"light\"\n font = \"serif\"\n ", + "n_words": 33, + "vocab_size": 26, + "n_whitespaces": 112, + "language": "en" + } + }, + { + "id": 166376, + "commit_id": "864729813a0203af8bb0d30b6c883588ae2c96f8", + "repo": "pandas", + "path": "pandas/tests/io/test_pickle.py", + "file_name": "test_pickle.py", + "fun_name": "test_pickle_binary_object_compression", + "commit_message": "ENH: add support for reading .tar archives (#44787)\n\n* Add reproduction test for .tar.gz archives\r\n\r\nco-authored-by: Margarete Dippel \r\n\r\n* add support for .tar archives\r\n\r\npython's `tarfile` supports gzip, xz and bz2 encoding,\r\nso we don't need to make any special cases for that.\r\n\r\nco-authored-by: Margarete Dippel \r\n\r\n* update doc comments\r\n\r\n* fix: pep8 errors\r\n\r\n* refactor: flip _compression_to_extension around to support multiple extensions on same compression\r\n\r\nco-authored-by: Margarete Dippel \r\n\r\n* refactor: detect tar files using existing extension mapping\r\n\r\nco-authored-by: Margarete Dippel \r\n\r\n* feat: add support for writing tar files\r\n\r\nco-authored-by: Margarete Dippel \r\n\r\n* feat: assure it respects .gz endings\r\n\r\n* feat: add \"tar\" entry to compressionoptions\r\n\r\n* chore: add whatsnew entry\r\n\r\n* fix: test_compression_size_fh\r\n\r\n* add tarfile to shared compression docs\r\n\r\n* fix formatting\r\n\r\n* pass through \"mode\" via compression args\r\n\r\n* fix pickle test\r\n\r\n* add class comment\r\n\r\n* sort imports\r\n\r\n* add _compression_to_extension back for backwards compatibility\r\n\r\n* fix some type warnings\r\n\r\n* fix: formatting\r\n\r\n* fix: mypy complaints\r\n\r\n* fix: more tests\r\n\r\n* fix: some error with xml\r\n\r\n* fix: interpreted text role\r\n\r\n* move to v1.5 whatsnw\r\n\r\n* add versionadded note\r\n\r\n* don't leave blank lines\r\n\r\n* add tests for zero files / multiple files\r\n\r\n* move _compression_to_extension to tests\r\n\r\n* revert added \"mode\" argument\r\n\r\n* add test to ensure that `compression.mode` works\r\n\r\n* compare strings, not bytes\r\n\r\n* replace carriage returns\r\n\r\nCo-authored-by: Margarete Dippel ", + "code": "def test_pickle_binary_object_compression(compression):\n \n df = tm.makeDataFrame()\n\n # reference for compression\n with tm.ensure_clean() as path:\n df.to_pickle(path, compression=compression)\n reference = Path(path).read_bytes()\n\n # write\n buffer = io.BytesIO()\n df.to_pickle(buffer, compression=compression)\n buffer.seek(0)\n\n # gzip and zip safe the filename: cannot compare the compressed content\n assert buffer.getvalue() == reference or compression in (\"gzip\", \"zip\", \"tar\")\n\n # read\n read_df = pd.read_pickle(buffer, compression=compression)\n buffer.seek(0)\n tm.assert_frame_equal(df, read_df)\n\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 114, + "n_words": 57, + "vocab_size": 44, + "complexity": 2, + "nloc": 12, + "token_counts": 109, + "n_ast_nodes": 188, + "n_identifiers": 20, + "d_id": 39808, + "documentation": { + "docstring": "\n Read/write from binary file-objects w/wo compression.\n\n GH 26237, GH 29054, and GH 29570\n ", + "n_words": 13, + "vocab_size": 11, + "n_whitespaces": 23, + "language": "en" + } + }, + { + "id": 70980, + "commit_id": "de3fcba9e95818e9634ab7de6bfcb1f4221f2775", + "repo": "wagtail", + "path": "wagtail/contrib/forms/views.py", + "file_name": "views.py", + "fun_name": "get_filtering", + "commit_message": "Fix warnings from flake8-comprehensions.", + "code": "def get_filtering(self):\n \n self.select_date_form = SelectDateForm(self.request.GET)\n result = {}\n if self.select_date_form.is_valid():\n date_from = self.select_date_form.cleaned_data.get('date_from')\n date_to = self.select_date_form.cleaned_data.get('date_to')\n if date_to:\n # careful: date_to must be increased by 1 day\n # as submit_time is a time so will always be greater\n date_to += datetime.timedelta(days=1)\n if date_from:\n result['submit_time__range'] = [date_from, date_to]\n else:\n result['submit_time__lte'] = date_to\n elif date_from:\n result['submit_time__gte'] = date_from\n return result\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 265, + "n_words": 58, + "vocab_size": 42, + "complexity": 5, + "nloc": 15, + "token_counts": 100, + "n_ast_nodes": 174, + "n_identifiers": 15, + "d_id": 15592, + "documentation": { + "docstring": " Return filering as a dict for submissions queryset ", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 67799, + "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", + "repo": "erpnext", + "path": "erpnext/stock/get_item_details.py", + "file_name": "get_item_details.py", + "fun_name": "get_so_reservation_for_item", + "commit_message": "style: format code with black", + "code": "def get_so_reservation_for_item(args):\n\treserved_so = None\n\tif args.get(\"against_sales_order\"):\n\t\tif get_reserved_qty_for_so(args.get(\"against_sales_order\"), args.get(\"item_code\")):\n\t\t\treserved_so = args.get(\"against_sales_order\")\n\telif args.get(\"against_sales_invoice\"):\n\t\tsales_order = frappe.db.sql(\n\t\t\t,\n\t\t\t(args.get(\"against_sales_invoice\"), args.get(\"item_code\")),\n\t\t)\n\t\tif sales_order and sales_order[0]:\n\t\t\tif get_reserved_qty_for_so(sales_order[0][0], args.get(\"item_code\")):\n\t\t\t\treserved_so = sales_order[0]\n\telif args.get(\"sales_order\"):\n\t\tif get_reserved_qty_for_so(args.get(\"sales_order\"), args.get(\"item_code\")):\n\t\t\treserved_so = args.get(\"sales_order\")\n\treturn reserved_so\n\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 25, + "n_words": 42, + "vocab_size": 26, + "complexity": 9, + "nloc": 18, + "token_counts": 146, + "n_ast_nodes": 253, + "n_identifiers": 9, + "d_id": 14622, + "documentation": { + "docstring": "select sales_order from `tabSales Invoice Item` where\n\t\tparent=%s and item_code=%s", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 248585, + "commit_id": "fe1daad67237c2154a3d8d8cdf6c603f0d33682e", + "repo": "synapse", + "path": "tests/util/test_macaroons.py", + "file_name": "test_macaroons.py", + "fun_name": "test_short_term_login_token", + "commit_message": "Move the \"email unsubscribe\" resource, refactor the macaroon generator & simplify the access token verification logic. (#12986)\n\nThis simplifies the access token verification logic by removing the `rights`\r\nparameter which was only ever used for the unsubscribe link in email\r\nnotifications. The latter has been moved under the `/_synapse` namespace,\r\nsince it is not a standard API.\r\n\r\nThis also makes the email verification link more secure, by embedding the\r\napp_id and pushkey in the macaroon and verifying it. This prevents the user\r\nfrom tampering the query parameters of that unsubscribe link.\r\n\r\nMacaroon generation is refactored:\r\n\r\n- Centralised all macaroon generation and verification logic to the\r\n `MacaroonGenerator`\r\n- Moved to `synapse.utils`\r\n- Changed the constructor to require only a `Clock`, hostname, and a secret key\r\n (instead of a full `Homeserver`).\r\n- Added tests for all methods.", + "code": "def test_short_term_login_token(self):\n \n token = self.macaroon_generator.generate_short_term_login_token(\n user_id=\"@user:tesths\",\n auth_provider_id=\"oidc\",\n auth_provider_session_id=\"sid\",\n duration_in_ms=2 * 60 * 1000,\n )\n\n info = self.macaroon_generator.verify_short_term_login_token(token)\n self.assertEqual(info.user_id, \"@user:tesths\")\n self.assertEqual(info.auth_provider_id, \"oidc\")\n self.assertEqual(info.auth_provider_session_id, \"sid\")\n\n # Raises with another secret key\n with self.assertRaises(MacaroonVerificationFailedException):\n self.other_macaroon_generator.verify_short_term_login_token(token)\n\n # Wait a minute\n self.reactor.pump([60])\n # Shouldn't raise\n self.macaroon_generator.verify_short_term_login_token(token)\n # Wait another minute\n self.reactor.pump([60])\n # Should raise since it expired\n with self.assertRaises(MacaroonVerificationFailedException):\n self.macaroon_generator.verify_short_term_login_token(token)\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 240, + "n_words": 55, + "vocab_size": 39, + "complexity": 1, + "nloc": 18, + "token_counts": 135, + "n_ast_nodes": 233, + "n_identifiers": 17, + "d_id": 72365, + "documentation": { + "docstring": "Test the generation and verification of short-term login tokens", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 42601, + "commit_id": "74bb3c28ce9f2cd2be4cd9176747d59a0d67285d", + "repo": "nltk", + "path": "nltk/sentiment/vader.py", + "file_name": "vader.py", + "fun_name": "polarity_scores", + "commit_message": "Add a note stating that a hashtag is unsupported in VADER", + "code": "def polarity_scores(self, text):\n \n # text, words_and_emoticons, is_cap_diff = self.preprocess(text)\n sentitext = SentiText(\n text, self.constants.PUNC_LIST, self.constants.REGEX_REMOVE_PUNCTUATION\n )\n sentiments = []\n words_and_emoticons = sentitext.words_and_emoticons\n for item in words_and_emoticons:\n valence = 0\n i = words_and_emoticons.index(item)\n if (\n i < len(words_and_emoticons) - 1\n and item.lower() == \"kind\"\n and words_and_emoticons[i + 1].lower() == \"of\"\n ) or item.lower() in self.constants.BOOSTER_DICT:\n sentiments.append(valence)\n continue\n\n sentiments = self.sentiment_valence(valence, sentitext, item, i, sentiments)\n\n sentiments = self._but_check(words_and_emoticons, sentiments)\n\n return self.score_valence(sentiments, text)\n", + "url": "https://github.com/nltk/nltk.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 274, + "n_words": 70, + "vocab_size": 53, + "complexity": 6, + "nloc": 19, + "token_counts": 138, + "n_ast_nodes": 218, + "n_identifiers": 21, + "d_id": 7656, + "documentation": { + "docstring": "\n Return a float for sentiment strength based on the input text.\n Positive values are positive valence, negative value are negative\n valence.\n\n :note: Hashtags are not taken into consideration (e.g. #BAD is neutral). If you\n are interested in processing the text in the hashtags too, then we recommend\n preprocessing your data to remove the #, after which the hashtag text may be\n matched as if it was a normal word in the sentence.\n ", + "n_words": 72, + "vocab_size": 59, + "n_whitespaces": 141, + "language": "en" + } + }, + { + "id": 110450, + "commit_id": "7a1df7830f7685a99291d90c5e79bfc5e7876f31", + "repo": "matplotlib", + "path": "lib/mpl_toolkits/mplot3d/tests/test_axes3d.py", + "file_name": "test_axes3d.py", + "fun_name": "test_mutating_input_arrays_y_and_z", + "commit_message": "Test that plot results aren't affected by mutating input arrays", + "code": "def test_mutating_input_arrays_y_and_z(fig_test, fig_ref):\n \n ax1 = fig_test.add_subplot(111, projection='3d')\n x = [1, 2, 3]\n y = [0.0, 0.0, 0.0]\n z = [0.0, 0.0, 0.0]\n ax1.plot(x, y, z, 'o-')\n\n ax1.set_ylim([0, 4])\n ax1.set_zlim([0, 4])\n fig_test.draw_without_rendering()\n\n # mutate y,z to get a nontrivial line\n y[:] = [1, 2, 3]\n z[:] = [1, 2, 3]\n\n # draw the same plot without mutating x and y\n ax2 = fig_ref.add_subplot(111, projection='3d')\n x = [1, 2, 3]\n y = [0.0, 0.0, 0.0]\n z = [0.0, 0.0, 0.0]\n ax2.plot(x, y, z, 'o-')\n\n ax2.set_ylim([0, 4])\n ax2.set_zlim([0, 4])\n fig_test.draw_without_rendering()\n", + "url": "https://github.com/matplotlib/matplotlib.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 150, + "n_words": 87, + "vocab_size": 46, + "complexity": 1, + "nloc": 19, + "token_counts": 208, + "n_ast_nodes": 277, + "n_identifiers": 14, + "d_id": 24166, + "documentation": { + "docstring": "\n Test to see if the `z` axis does not get mutated\n after a call to `Axes3D.plot`\n\n test cases came from GH#8990\n ", + "n_words": 21, + "vocab_size": 20, + "n_whitespaces": 34, + "language": "en" + } + }, + { + "id": 45441, + "commit_id": "bb26f96665567325a7fbb810249820e7dac0322a", + "repo": "airflow", + "path": "airflow/www/views.py", + "file_name": "views.py", + "fun_name": "dag_edges", + "commit_message": "Make Grid and and Graph view work with task mapping (#21740)\n\n* Expand mapped tasks in the Scheduler\r\n\r\nTechnically this is done inside\r\nDagRun.task_instance_scheduling_decisions, but the only place that is\r\ncurrently called is the Scheduler\r\n\r\nThe way we are getting `upstream_ti` to pass to expand_mapped_task is\r\nall sorts of wrong and will need fixing, I think the interface for that\r\nmethod is wrong and the mapped task should be responsible for finding\r\nthe right upstream TI itself.\r\n\r\n* make UI and tree work with mapped tasks\r\n\r\n* add graph tooltip and map count\r\n\r\n* simplify node label redraw logic\r\n\r\n* add utils.js and map_index to /taskInstances\r\n\r\n* use TaskInstanceState instead of strings\r\n\r\n* move map_index on /taskinstance to separate PR\r\n\r\n* check to use Task or Tasks\r\n\r\n* remove `no_status` and use TaskInstanceState\r\n\r\nCo-authored-by: Ash Berlin-Taylor ", + "code": "def dag_edges(dag):\n \n # Edges to add between TaskGroup\n edges_to_add = set()\n # Edges to remove between individual tasks that are replaced by edges_to_add.\n edges_to_skip = set()\n\n task_group_map = dag.task_group.get_task_group_dict()\n", + "url": "https://github.com/apache/airflow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 47, + "n_words": 29, + "vocab_size": 22, + "complexity": 4, + "nloc": 18, + "token_counts": 115, + "n_ast_nodes": 48, + "n_identifiers": 8, + "d_id": 8568, + "documentation": { + "docstring": "\n Create the list of edges needed to construct the Graph view.\n\n A special case is made if a TaskGroup is immediately upstream/downstream of another\n TaskGroup or task. Two dummy nodes named upstream_join_id and downstream_join_id are\n created for the TaskGroup. Instead of drawing an edge onto every task in the TaskGroup,\n all edges are directed onto the dummy nodes. This is to cut down the number of edges on\n the graph.\n\n For example: A DAG with TaskGroups group1 and group2:\n group1: task1, task2, task3\n group2: task4, task5, task6\n\n group2 is downstream of group1:\n group1 >> group2\n\n Edges to add (This avoids having to create edges between every task in group1 and group2):\n task1 >> downstream_join_id\n task2 >> downstream_join_id\n task3 >> downstream_join_id\n downstream_join_id >> upstream_join_id\n upstream_join_id >> task4\n upstream_join_id >> task5\n upstream_join_id >> task6\n ", + "n_words": 132, + "vocab_size": 81, + "n_whitespaces": 233, + "language": "en" + } + }, + { + "id": 37492, + "commit_id": "57e6464ac9a31156f1c93e59107323e6ec01309e", + "repo": "transformers", + "path": "src/transformers/testing_utils.py", + "file_name": "testing_utils.py", + "fun_name": "require_tokenizers", + "commit_message": "Update all require decorators to use skipUnless when possible (#16999)", + "code": "def require_tokenizers(test_case):\n \n return unittest.skipUnless(is_tokenizers_available(), \"test requires tokenizers\")(test_case)\n\n", + "url": "https://github.com/huggingface/transformers.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 13, + "n_words": 7, + "vocab_size": 7, + "complexity": 1, + "nloc": 2, + "token_counts": 20, + "n_ast_nodes": 37, + "n_identifiers": 5, + "d_id": 6797, + "documentation": { + "docstring": "\n Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.\n ", + "n_words": 17, + "vocab_size": 16, + "n_whitespaces": 24, + "language": "en" + } + }, + { + "id": 291628, + "commit_id": "1c0f9cf941f77d6e3d299f98d5174f0a2953f236", + "repo": "core", + "path": "homeassistant/components/overkiz/water_heater_entities/hitachi_dhw.py", + "file_name": "hitachi_dhw.py", + "fun_name": "current_operation", + "commit_message": "Add Overkiz Hitachi DHW (#81536)\n\n* Port ha-tahome hitachi dhw\r\n\r\n* Use int for setting temperature\r\n\r\n* Use value as float when possible\r\n\r\n* Use device state for current operation\r\n\r\n* Update homeassistant/components/overkiz/water_heater_entities/hitachi_dhw.py\r\n\r\nCo-authored-by: Quentame \r\n\r\n* Update homeassistant/components/overkiz/water_heater_entities/hitachi_dhw.py\r\n\r\nCo-authored-by: Quentame \r\n\r\n* Use ON instead of ECO for standard operation mode\r\n\r\nCo-authored-by: Quentame ", + "code": "def current_operation(self) -> str | None:\n \n modbus_control = self.device.states[OverkizState.MODBUS_CONTROL_DHW]\n if modbus_control and modbus_control.value_as_str == OverkizCommandParam.STOP:\n return STATE_OFF\n\n current_mode = self.device.states[OverkizState.MODBUS_DHW_MODE]\n if current_mode and current_mode.value_as_str in OVERKIZ_TO_OPERATION_MODE:\n return OVERKIZ_TO_OPERATION_MODE[current_mode.value_as_str]\n\n return None\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 94, + "n_words": 30, + "vocab_size": 23, + "complexity": 5, + "nloc": 9, + "token_counts": 65, + "n_ast_nodes": 102, + "n_identifiers": 15, + "d_id": 90732, + "documentation": { + "docstring": "Return current operation ie. eco, electric, performance, ...", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 42453, + "commit_id": "8ffd0d8190552d45f8b92e18da3fc41639e5185d", + "repo": "nltk", + "path": "nltk/corpus/reader/wordnet.py", + "file_name": "wordnet.py", + "fun_name": "add_provs", + "commit_message": "Initialize empty provenance for default English", + "code": "def add_provs(self, reader):\n \n fileids = reader.fileids()\n for fileid in fileids:\n prov, langfile = os.path.split(fileid)\n file_name, file_extension = os.path.splitext(langfile)\n if file_extension == \".tab\":\n lang = file_name.split(\"-\")[-1]\n if lang in self.provenances.keys():\n # We already have another resource for this lang,\n # so we need to further specify the lang id:\n lang = f\"{lang}_{prov}\"\n self.provenances[lang] = prov\n", + "url": "https://github.com/nltk/nltk.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 210, + "n_words": 54, + "vocab_size": 41, + "complexity": 4, + "nloc": 10, + "token_counts": 84, + "n_ast_nodes": 150, + "n_identifiers": 16, + "d_id": 7546, + "documentation": { + "docstring": "Add languages from Multilingual Wordnet to the provenance dictionary", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 20229, + "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", + "repo": "pipenv", + "path": "pipenv/patched/notpip/_vendor/platformdirs/unix.py", + "file_name": "unix.py", + "fun_name": "site_data_dir", + "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", + "code": "def site_data_dir(self) -> str:\n \n # XDG default for $XDG_DATA_DIRS; only first, if multipath is False\n path = os.environ.get(\"XDG_DATA_DIRS\", \"\")\n if not path.strip():\n path = f\"/usr/local/share{os.pathsep}/usr/share\"\n return self._with_multi_path(path)\n", + "url": "https://github.com/pypa/pipenv.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 73, + "n_words": 27, + "vocab_size": 24, + "complexity": 2, + "nloc": 10, + "token_counts": 39, + "n_ast_nodes": 78, + "n_identifiers": 10, + "d_id": 3281, + "documentation": { + "docstring": "\n :return: data directories shared by users (if `multipath ` is\n enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS\n path separator), e.g. ``/usr/local/share/$appname/$version`` or ``/usr/share/$appname/$version``\n ", + "n_words": 36, + "vocab_size": 27, + "n_whitespaces": 67, + "language": "en" + } + }, + { + "id": 197540, + "commit_id": "7fe8e027ae1d7f683243c0229b961671a6cbb4c5", + "repo": "sympy", + "path": "sympy/stats/stochastic_process_types.py", + "file_name": "stochastic_process_types.py", + "fun_name": "absorbing_probabilities", + "commit_message": "Improved some documentation in the stats module", + "code": "def absorbing_probabilities(self):\n \n _, _, R, _ = self.decompose()\n N = self.fundamental_matrix()\n if R is None or N is None:\n return None\n return N*R\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 69, + "n_words": 23, + "vocab_size": 17, + "complexity": 3, + "nloc": 6, + "token_counts": 41, + "n_ast_nodes": 67, + "n_identifiers": 7, + "d_id": 48618, + "documentation": { + "docstring": "\n Computes the absorbing probabilities, i.e.\n the ij-th entry of the matrix denotes the\n probability of Markov chain being absorbed\n in state j starting from state i.\n ", + "n_words": 26, + "vocab_size": 21, + "n_whitespaces": 62, + "language": "en" + } + }, + { + "id": 20520, + "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", + "repo": "pipenv", + "path": "pipenv/patched/notpip/_vendor/pygments/util.py", + "file_name": "util.py", + "fun_name": "doctype_matches", + "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", + "code": "def doctype_matches(text, regex):\n \n m = doctype_lookup_re.search(text)\n if m is None:\n return False\n doctype = m.group(1)\n return re.compile(regex, re.I).match(doctype.strip()) is not None\n\n", + "url": "https://github.com/pypa/pipenv.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 43, + "n_words": 21, + "vocab_size": 17, + "complexity": 2, + "nloc": 6, + "token_counts": 54, + "n_ast_nodes": 87, + "n_identifiers": 13, + "d_id": 3407, + "documentation": { + "docstring": "Check if the doctype matches a regular expression (if present).\n\n Note that this method only checks the first part of a DOCTYPE.\n eg: 'html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\"'\n ", + "n_words": 29, + "vocab_size": 27, + "n_whitespaces": 38, + "language": "en" + } + }, + { + "id": 100170, + "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", + "repo": "sentry", + "path": "tests/sentry/data_export/endpoints/test_data_export.py", + "file_name": "test_data_export.py", + "fun_name": "test_converts_stats_period_start_end", + "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", + "code": "def test_converts_stats_period_start_end(self):\n \n payload = self.make_payload(\"discover\", {\"statsPeriodStart\": \"1w\", \"statsPeriodEnd\": \"5d\"})\n with self.feature(\"organizations:discover-query\"):\n response = self.get_success_response(self.org.slug, status_code=201, **payload)\n data_export = ExportedData.objects.get(id=response.data[\"id\"])\n query_info = data_export.query_info\n assert parse_datetime_string(query_info[\"start\"]) == parse_datetime_string(\n \"2020-05-12T14:00:00\"\n )\n assert parse_datetime_string(query_info[\"end\"]) == parse_datetime_string(\n \"2020-05-14T14:00:00\"\n )\n assert \"statsPeriod\" not in query_info\n assert \"statsPeriodStart\" not in query_info\n assert \"statsPeriodSEnd\" not in query_info\n", + "url": "https://github.com/getsentry/sentry.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 166, + "n_words": 49, + "vocab_size": 32, + "complexity": 1, + "nloc": 15, + "token_counts": 114, + "n_ast_nodes": 205, + "n_identifiers": 18, + "d_id": 19764, + "documentation": { + "docstring": "\n Ensures that statsPeriodStart and statsPeriodEnd is converted to start/end.\n ", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 24, + "language": "en" + } + }, + { + "id": 203593, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/contrib/auth/admin.py", + "file_name": "admin.py", + "fun_name": "response_add", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def response_add(self, request, obj, post_url_continue=None):\n \n # We should allow further modification of the user just added i.e. the\n # 'Save' button should behave like the 'Save and continue editing'\n # button except in two scenarios:\n # * The user has pressed the 'Save and add another' button\n # * We are adding a user in a popup\n if \"_addanother\" not in request.POST and IS_POPUP_VAR not in request.POST:\n request.POST = request.POST.copy()\n request.POST[\"_continue\"] = 1\n return super().response_add(request, obj, post_url_continue)\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 155, + "n_words": 77, + "vocab_size": 52, + "complexity": 3, + "nloc": 5, + "token_counts": 61, + "n_ast_nodes": 102, + "n_identifiers": 9, + "d_id": 50467, + "documentation": { + "docstring": "\n Determine the HttpResponse for the add_view stage. It mostly defers to\n its superclass implementation but is customized because the User model\n has a slightly different workflow.\n ", + "n_words": 26, + "vocab_size": 24, + "n_whitespaces": 55, + "language": "en" + } + }, + { + "id": 296690, + "commit_id": "b0ed42a5a58976ebe82b5bbbb60c499648a1718b", + "repo": "core", + "path": "tests/components/daikin/test_temperature_format.py", + "file_name": "test_temperature_format.py", + "fun_name": "test_decimal_conversion_more_digits", + "commit_message": "Fix #69952: Daikin AC Temperature jumps after being set (#70326)", + "code": "def test_decimal_conversion_more_digits():\n \n formatted = format_target_temperature(\"16.09\")\n assert formatted == \"16.1\"\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 18, + "n_words": 9, + "vocab_size": 8, + "complexity": 1, + "nloc": 3, + "token_counts": 15, + "n_ast_nodes": 32, + "n_identifiers": 3, + "d_id": 95665, + "documentation": { + "docstring": "Check at most 1 decimal is kept when target temp is a decimal with more than 1 decimal.", + "n_words": 18, + "vocab_size": 15, + "n_whitespaces": 17, + "language": "en" + } + }, + { + "id": 70608, + "commit_id": "60ba39ffb5ec6d760efa6e2ecbff7ede53b12464", + "repo": "wagtail", + "path": "wagtail/admin/views/workflows.py", + "file_name": "workflows.py", + "fun_name": "get_task_chosen_response", + "commit_message": "replace get_task_result_data helper with more useful one get_task_chosen_response", + "code": "def get_task_chosen_response(request, task):\n \n result_data = {\n 'id': task.id,\n 'name': task.name,\n 'edit_url': reverse('wagtailadmin_workflows:edit_task', args=[task.id]),\n }\n return render_modal_workflow(\n request, None, None,\n None, json_data={'step': 'task_chosen', 'result': result_data}\n )\n\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 75, + "n_words": 25, + "vocab_size": 23, + "complexity": 1, + "nloc": 10, + "token_counts": 62, + "n_ast_nodes": 103, + "n_identifiers": 10, + "d_id": 15531, + "documentation": { + "docstring": "\n helper function: given a task, return the response indicating that it has been chosen\n ", + "n_words": 14, + "vocab_size": 14, + "n_whitespaces": 21, + "language": "en" + } + }, + { + "id": 176513, + "commit_id": "c3e1e7f4c6a4edb968494cd4775574ad26f2a96b", + "repo": "networkx", + "path": "networkx/algorithms/matching.py", + "file_name": "matching.py", + "fun_name": "min_weight_matching", + "commit_message": "Fix min_weight_matching to convert edge weights without reciprocal (#5394)\n\n* Add test and then fix code and docs\r\n\r\n* Correct and improve docs. Change 1e-6 to 1 to maintain integers.\r\n\r\nInclude argument in docstring for why adding the 1 doesn't impact the min", + "code": "def min_weight_matching(G, maxcardinality=None, weight=\"weight\"):\n \n if maxcardinality not in (True, None):\n raise nx.NetworkXError(\n \"The argument maxcardinality does not make sense \"\n \"in the context of minimum weight matchings.\"\n \"It is deprecated and will be removed in v3.0.\"\n )\n if len(G.edges) == 0:\n return max_weight_matching(G, maxcardinality=True, weight=weight)\n G_edges = G.edges(data=weight, default=1)\n max_weight = 1 + max(w for _, _, w in G_edges)\n InvG = nx.Graph()\n edges = ((u, v, max_weight - w) for u, v, w in G_edges)\n InvG.add_weighted_edges_from(edges, weight=weight)\n return max_weight_matching(InvG, maxcardinality=True, weight=weight)\n\n\n@not_implemented_for(\"multigraph\")\n@not_implemented_for(\"directed\")", + "url": "https://github.com/networkx/networkx.git", + "language": "Python", + "ast_errors": "@not_implemented_for(\"multigraph\")\n@not_implemented_for(\"directed\")", + "n_ast_errors": 1, + "ast_levels": 11, + "n_whitespaces": 163, + "n_words": 84, + "vocab_size": 65, + "complexity": 5, + "nloc": 15, + "token_counts": 137, + "n_ast_nodes": 231, + "n_identifiers": 22, + "d_id": 41940, + "documentation": { + "docstring": "Computing a minimum-weight maximal matching of G.\n\n Use the maximum-weight algorithm with edge weights subtracted\n from the maximum weight of all edges.\n\n A matching is a subset of edges in which no node occurs more than once.\n The weight of a matching is the sum of the weights of its edges.\n A maximal matching cannot add more edges and still be a matching.\n The cardinality of a matching is the number of matched edges.\n\n This method replaces the edge weights with 1 plus the maximum edge weight\n minus the original edge weight.\n\n new_weight = (max_weight + 1) - edge_weight\n\n then runs :func:`max_weight_matching` with the new weights.\n The max weight matching with these new weights corresponds\n to the min weight matching using the original weights.\n Adding 1 to the max edge weight keeps all edge weights positive\n and as integers if they started as integers.\n\n You might worry that adding 1 to each weight would make the algorithm\n favor matchings with more edges. But we use the parameter\n `maxcardinality=True` in `max_weight_matching` to ensure that the\n number of edges in the competing matchings are the same and thus\n the optimum does not change due to changes in the number of edges.\n\n Read the documentation of `max_weight_matching` for more information.\n\n Parameters\n ----------\n G : NetworkX graph\n Undirected graph\n\n maxcardinality: bool\n .. deprecated:: 2.8\n The `maxcardinality` parameter will be removed in v3.0.\n It doesn't make sense to set it to False when looking for\n a min weight matching because then we just return no edges.\n\n If maxcardinality is True, compute the maximum-cardinality matching\n with minimum weight among all maximum-cardinality matchings.\n\n weight: string, optional (default='weight')\n Edge data key corresponding to the edge weight.\n If key not found, uses 1 as weight.\n\n Returns\n -------\n matching : set\n A minimal weight matching of the graph.\n\n See Also\n --------\n max_weight_matching\n ", + "n_words": 302, + "vocab_size": 163, + "n_whitespaces": 476, + "language": "en" + } + }, + { + "id": 216056, + "commit_id": "a5679caf65c7c79cd72841b6e5793b9b693744c9", + "repo": "salt", + "path": "salt/cloud/clouds/proxmox.py", + "file_name": "proxmox.py", + "fun_name": "_find_agent_ip", + "commit_message": "Add support for get IP-address from agent", + "code": "def _find_agent_ip(vm_, vmid):\n \n\n # This functionality is only available on qemu\n if not vm_.get(\"technology\") == \"qemu\":\n log.warning(\"Find agent IP is only available under `qemu`\")\n return\n\n # Create an empty list of IP-addresses:\n ips = []\n\n endpoint = \"nodes/{}/qemu/{}/agent/network-get-interfaces\".format(vm_[\"host\"], vmid)\n interfaces = query(\"get\", endpoint)\n\n # If we get a result from the agent, parse it\n for interface in interfaces[\"result\"]:\n\n # Skip interface if hardware-address is 00:00:00:00:00:00 (loopback interface)\n if str(interface.get(\"hardware-address\")) == \"00:00:00:00:00:00\":\n continue\n\n # Skip entries without ip-addresses information\n if \"ip-addresses\" not in interface:\n continue\n\n for if_addr in interface[\"ip-addresses\"]:\n ip_addr = if_addr.get(\"ip-address\")\n if ip_addr is not None:\n ips.append(str(ip_addr))\n\n if len(ips) > 0:\n return preferred_ip(vm_, ips)\n\n raise SaltCloudExecutionFailure\n\n", + "url": "https://github.com/saltstack/salt.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 254, + "n_words": 106, + "vocab_size": 78, + "complexity": 8, + "nloc": 19, + "token_counts": 128, + "n_ast_nodes": 231, + "n_identifiers": 19, + "d_id": 54362, + "documentation": { + "docstring": "\n If VM is started we would return the IP-addresses that are returned by the qemu agent on the VM.\n ", + "n_words": 19, + "vocab_size": 17, + "n_whitespaces": 26, + "language": "en" + } + }, + { + "id": 215737, + "commit_id": "3bb43882e727b1d36abe2e501759c9c5e9048ecf", + "repo": "salt", + "path": "tests/pytests/unit/utils/win_dacl/test_get_name.py", + "file_name": "test_get_name.py", + "fun_name": "test_get_name_error", + "commit_message": "Add tests, migrate some tests to pytest", + "code": "def test_get_name_error():\n \n test_sid = \"S-1-2-3-4\"\n sid_obj = win32security.ConvertStringSidToSid(test_sid)\n with pytest.raises(salt.exceptions.CommandExecutionError) as exc:\n salt.utils.win_dacl.get_name(sid_obj)\n assert \"No mapping between account names\" in exc.value.message\n", + "url": "https://github.com/saltstack/salt.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 43, + "n_words": 21, + "vocab_size": 20, + "complexity": 1, + "nloc": 6, + "token_counts": 48, + "n_ast_nodes": 87, + "n_identifiers": 16, + "d_id": 54131, + "documentation": { + "docstring": "\n Test get_name with an un mapped SID, should throw a CommandExecutionError\n ", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 18, + "language": "en" + } + }, + { + "id": 205195, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/db/backends/sqlite3/introspection.py", + "file_name": "introspection.py", + "fun_name": "get_relations", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def get_relations(self, cursor, table_name):\n \n cursor.execute(\n \"PRAGMA foreign_key_list(%s)\" % self.connection.ops.quote_name(table_name)\n )\n return {\n column_name: (ref_column_name, ref_table_name)\n for _, _, ref_table_name, column_name, ref_column_name, *_ in cursor.fetchall()\n }\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 93, + "n_words": 25, + "vocab_size": 24, + "complexity": 2, + "nloc": 8, + "token_counts": 56, + "n_ast_nodes": 85, + "n_identifiers": 13, + "d_id": 51029, + "documentation": { + "docstring": "\n Return a dictionary of {column_name: (ref_column_name, ref_table_name)}\n representing all foreign keys in the given table.\n ", + "n_words": 15, + "vocab_size": 15, + "n_whitespaces": 37, + "language": "en" + } + }, + { + "id": 247442, + "commit_id": "26211fec24d8d0a967de33147e148166359ec8cb", + "repo": "synapse", + "path": "synapse/storage/background_updates.py", + "file_name": "background_updates.py", + "fun_name": "average_items_per_ms", + "commit_message": "Fix a bug in background updates wherein background updates are never run using the default batch size (#12157)", + "code": "def average_items_per_ms(self) -> Optional[float]:\n \n # We want to return None if this is the first background update item\n if self.total_item_count == 0:\n return None\n # Avoid dividing by zero\n elif self.avg_duration_ms == 0:\n return 0\n else:\n # Use the exponential moving average so that we can adapt to\n # changes in how long the update process takes.\n return float(self.avg_item_count) / float(self.avg_duration_ms)\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 158, + "n_words": 61, + "vocab_size": 47, + "complexity": 3, + "nloc": 11, + "token_counts": 45, + "n_ast_nodes": 78, + "n_identifiers": 7, + "d_id": 71674, + "documentation": { + "docstring": "An estimate of how long it takes to do a single update.\n Returns:\n A duration in ms as a float\n ", + "n_words": 20, + "vocab_size": 19, + "n_whitespaces": 45, + "language": "en" + } + }, + { + "id": 307669, + "commit_id": "26251895295d74fcd2c73e37804c23675c433247", + "repo": "core", + "path": "homeassistant/components/forked_daapd/media_player.py", + "file_name": "media_player.py", + "fun_name": "_pause_and_wait_for_callback", + "commit_message": "Use async_timeout in forked_daapd (#78451)", + "code": "async def _pause_and_wait_for_callback(self):\n \n self._pause_requested = True\n await self.async_media_pause()\n try:", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "async def _pause_and_wait_for_callback(self):\n \"\"\"Send pause and wait for the pause callback to be received.\"\"\"\n self._pause_requested = True\n await self.async_media_pause()\n try:", + "n_ast_errors": 1, + "ast_levels": 7, + "n_whitespaces": 37, + "n_words": 9, + "vocab_size": 9, + "complexity": 2, + "nloc": 9, + "token_counts": 53, + "n_ast_nodes": 34, + "n_identifiers": 4, + "d_id": 106437, + "documentation": { + "docstring": "Send pause and wait for the pause callback to be received.", + "n_words": 11, + "vocab_size": 10, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 212865, + "commit_id": "07bb93d47f01468660a01f42150e87e5cb08d546", + "repo": "PySimpleGUI", + "path": "PySimpleGUI.py", + "file_name": "PySimpleGUI.py", + "fun_name": "set_options", + "commit_message": "Addition of tooltip_offset parm to set_options call (major hack to get around 8.6.12 problem). Backed out the experiments to try and fix new problem with Ubuntu", + "code": "def set_options(icon=None, button_color=None, element_size=(None, None), button_element_size=(None, None),\n margins=(None, None),\n element_padding=(None, None), auto_size_text=None, auto_size_buttons=None, font=None, border_width=None,\n slider_border_width=None, slider_relief=None, slider_orientation=None,\n autoclose_time=None, message_box_line_width=None,\n progress_meter_border_depth=None, progress_meter_style=None,\n progress_meter_relief=None, progress_meter_color=None, progress_meter_size=None,\n text_justification=None, background_color=None, element_background_color=None,\n text_element_background_color=None, input_elements_background_color=None, input_text_color=None,\n scrollbar_color=None, text_color=None, element_text_color=None, debug_win_size=(None, None),\n window_location=(None, None), error_button_color=(None, None), tooltip_time=None, tooltip_font=None, use_ttk_buttons=None, ttk_theme=None,\n suppress_error_popups=None, suppress_raise_key_errors=None, suppress_key_guessing=None,warn_button_key_duplicates=False, enable_treeview_869_patch=None,\n enable_mac_notitlebar_patch=None, use_custom_titlebar=None, titlebar_background_color=None, titlebar_text_color=None, titlebar_font=None,\n titlebar_icon=None, user_settings_path=None, pysimplegui_settings_path=None, pysimplegui_settings_filename=None, keep_on_top=None, dpi_awareness=None, scaling=None, disable_modal_windows=None, tooltip_offset=(None, None)):\n \n\n global DEFAULT_ELEMENT_SIZE\n global DEFAULT_BUTTON_ELEMENT_SIZE\n global DEFAULT_MARGINS # Margins for each LEFT/RIGHT margin is first term\n global DEFAULT_ELEMENT_PADDING # Padding between elements (row, col) in pixels\n global DEFAULT_AUTOSIZE_TEXT\n global DEFAULT_AUTOSIZE_BUTTONS\n global DEFAULT_FONT\n global DEFAULT_BORDER_WIDTH\n global DEFAULT_AUTOCLOSE_TIME\n global DEFAULT_BUTTON_COLOR\n global MESSAGE_BOX_LINE_WIDTH\n global DEFAULT_PROGRESS_BAR_BORDER_WIDTH\n global DEFAULT_PROGRESS_BAR_STYLE\n global DEFAULT_PROGRESS_BAR_RELIEF\n global DEFAULT_PROGRESS_BAR_COLOR\n global DEFAULT_PROGRESS_BAR_SIZE\n global DEFAULT_TEXT_JUSTIFICATION\n global DEFAULT_DEBUG_WINDOW_SIZE\n global DEFAULT_SLIDER_BORDER_WIDTH\n global DEFAULT_SLIDER_RELIEF\n global DEFAULT_SLIDER_ORIENTATION\n global DEFAULT_BACKGROUND_COLOR\n global DEFAULT_INPUT_ELEMENTS_COLOR\n global DEFAULT_ELEMENT_BACKGROUND_COLOR\n global DEFAULT_TEXT_ELEMENT_BACKGROUND_COLOR\n global DEFAULT_SCROLLBAR_COLOR\n global DEFAULT_TEXT_COLOR\n global DEFAULT_WINDOW_LOCATION\n global DEFAULT_ELEMENT_TEXT_COLOR\n global DEFAULT_INPUT_TEXT_COLOR\n global DEFAULT_TOOLTIP_TIME\n global DEFAULT_ERROR_BUTTON_COLOR\n global DEFAULT_TTK_THEME\n global USE_TTK_BUTTONS\n global TOOLTIP_FONT\n global SUPPRESS_ERROR_POPUPS\n global SUPPRESS_RAISE_KEY_ERRORS\n global SUPPRESS_KEY_GUESSING\n global WARN_DUPLICATE_BUTTON_KEY_ERRORS\n global ENABLE_TREEVIEW_869_PATCH\n global ENABLE_MAC_NOTITLEBAR_PATCH\n global USE_CUSTOM_TITLEBAR\n global CUSTOM_TITLEBAR_BACKGROUND_COLOR\n global CUSTOM_TITLEBAR_TEXT_COLOR\n global CUSTOM_TITLEBAR_ICON\n global CUSTOM_TITLEBAR_FONT\n global DEFAULT_USER_SETTINGS_PATH\n global DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH\n global DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME\n global DEFAULT_KEEP_ON_TOP\n global DEFAULT_SCALING\n global DEFAULT_MODAL_WINDOWS_ENABLED\n global DEFAULT_TOOLTIP_OFFSET\n global _pysimplegui_user_settings\n # global _my_windows\n\n if icon:\n Window._user_defined_icon = icon\n # _my_windows._user_defined_icon = icon\n\n if button_color != None:\n if button_color == COLOR_SYSTEM_DEFAULT:\n DEFAULT_BUTTON_COLOR = (COLOR_SYSTEM_DEFAULT, COLOR_SYSTEM_DEFAULT)\n else:\n DEFAULT_BUTTON_COLOR = button_color\n\n if element_size != (None, None):\n DEFAULT_ELEMENT_SIZE = element_size\n\n if button_element_size != (None, None):\n DEFAULT_BUTTON_ELEMENT_SIZE = button_element_size\n\n if margins != (None, None):\n DEFAULT_MARGINS = margins\n\n if element_padding != (None, None):\n DEFAULT_ELEMENT_PADDING = element_padding\n\n if auto_size_text != None:\n DEFAULT_AUTOSIZE_TEXT = auto_size_text\n\n if auto_size_buttons != None:\n DEFAULT_AUTOSIZE_BUTTONS = auto_size_buttons\n\n if font != None:\n DEFAULT_FONT = font\n\n if border_width != None:\n DEFAULT_BORDER_WIDTH = border_width\n\n if autoclose_time != None:\n DEFAULT_AUTOCLOSE_TIME = autoclose_time\n\n if message_box_line_width != None:\n MESSAGE_BOX_LINE_WIDTH = message_box_line_width\n\n if progress_meter_border_depth != None:\n DEFAULT_PROGRESS_BAR_BORDER_WIDTH = progress_meter_border_depth\n\n if progress_meter_style != None:\n warnings.warn('You can no longer set a progress bar style. All ttk styles must be the same for the window', UserWarning)\n # DEFAULT_PROGRESS_BAR_STYLE = progress_meter_style\n\n if progress_meter_relief != None:\n DEFAULT_PROGRESS_BAR_RELIEF = progress_meter_relief\n\n if progress_meter_color != None:\n DEFAULT_PROGRESS_BAR_COLOR = progress_meter_color\n\n if progress_meter_size != None:\n DEFAULT_PROGRESS_BAR_SIZE = progress_meter_size\n\n if slider_border_width != None:\n DEFAULT_SLIDER_BORDER_WIDTH = slider_border_width\n\n if slider_orientation != None:\n DEFAULT_SLIDER_ORIENTATION = slider_orientation\n\n if slider_relief != None:\n DEFAULT_SLIDER_RELIEF = slider_relief\n\n if text_justification != None:\n DEFAULT_TEXT_JUSTIFICATION = text_justification\n\n if background_color != None:\n DEFAULT_BACKGROUND_COLOR = background_color\n\n if text_element_background_color != None:\n DEFAULT_TEXT_ELEMENT_BACKGROUND_COLOR = text_element_background_color\n\n if input_elements_background_color != None:\n DEFAULT_INPUT_ELEMENTS_COLOR = input_elements_background_color\n\n if element_background_color != None:\n DEFAULT_ELEMENT_BACKGROUND_COLOR = element_background_color\n\n if window_location != (None, None):\n DEFAULT_WINDOW_LOCATION = window_location\n\n if debug_win_size != (None, None):\n DEFAULT_DEBUG_WINDOW_SIZE = debug_win_size\n\n if text_color != None:\n DEFAULT_TEXT_COLOR = text_color\n\n if scrollbar_color != None:\n DEFAULT_SCROLLBAR_COLOR = scrollbar_color\n\n if element_text_color != None:\n DEFAULT_ELEMENT_TEXT_COLOR = element_text_color\n\n if input_text_color is not None:\n DEFAULT_INPUT_TEXT_COLOR = input_text_color\n\n if tooltip_time is not None:\n DEFAULT_TOOLTIP_TIME = tooltip_time\n\n if error_button_color != (None, None):\n DEFAULT_ERROR_BUTTON_COLOR = error_button_color\n\n if ttk_theme is not None:\n DEFAULT_TTK_THEME = ttk_theme\n\n if use_ttk_buttons is not None:\n USE_TTK_BUTTONS = use_ttk_buttons\n\n if tooltip_font is not None:\n TOOLTIP_FONT = tooltip_font\n\n if suppress_error_popups is not None:\n SUPPRESS_ERROR_POPUPS = suppress_error_popups\n\n if suppress_raise_key_errors is not None:\n SUPPRESS_RAISE_KEY_ERRORS = suppress_raise_key_errors\n\n if suppress_key_guessing is not None:\n SUPPRESS_KEY_GUESSING = suppress_key_guessing\n\n if warn_button_key_duplicates is not None:\n WARN_DUPLICATE_BUTTON_KEY_ERRORS = warn_button_key_duplicates\n\n if enable_treeview_869_patch is not None:\n ENABLE_TREEVIEW_869_PATCH = enable_treeview_869_patch\n\n if enable_mac_notitlebar_patch is not None:\n ENABLE_MAC_NOTITLEBAR_PATCH = enable_mac_notitlebar_patch\n\n if use_custom_titlebar is not None:\n USE_CUSTOM_TITLEBAR = use_custom_titlebar\n\n if titlebar_background_color is not None:\n CUSTOM_TITLEBAR_BACKGROUND_COLOR = titlebar_background_color\n\n if titlebar_text_color is not None:\n CUSTOM_TITLEBAR_TEXT_COLOR = titlebar_text_color\n\n if titlebar_font is not None:\n CUSTOM_TITLEBAR_FONT = titlebar_font\n\n if titlebar_icon is not None:\n CUSTOM_TITLEBAR_ICON = titlebar_icon\n\n if user_settings_path is not None:\n DEFAULT_USER_SETTINGS_PATH = user_settings_path\n\n if pysimplegui_settings_path is not None:\n DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH = pysimplegui_settings_path\n\n if pysimplegui_settings_filename is not None:\n DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME = pysimplegui_settings_filename\n\n if pysimplegui_settings_filename is not None or pysimplegui_settings_filename is not None:\n _pysimplegui_user_settings = UserSettings(filename=DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME,\n path=DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH)\n\n if keep_on_top is not None:\n DEFAULT_KEEP_ON_TOP = keep_on_top\n\n if dpi_awareness is True:\n if running_windows():\n if platform.release() == \"7\":\n ctypes.windll.user32.SetProcessDPIAware()\n elif platform.release() == \"8\" or platform.release() == \"10\":\n ctypes.windll.shcore.SetProcessDpiAwareness(1)\n\n if scaling is not None:\n DEFAULT_SCALING = scaling\n\n if disable_modal_windows is not None:\n DEFAULT_MODAL_WINDOWS_ENABLED = not disable_modal_windows\n\n if tooltip_offset != (None, None):\n DEFAULT_TOOLTIP_OFFSET = tooltip_offset\n\n return True\n\n\n# ----------------------------------------------------------------- #\n\n# .########.##.....##.########.##.....##.########..######.\n# ....##....##.....##.##.......###...###.##.......##....##\n# ....##....##.....##.##.......####.####.##.......##......\n# ....##....#########.######...##.###.##.######....######.\n# ....##....##.....##.##.......##.....##.##.............##\n# ....##....##.....##.##.......##.....##.##.......##....##\n# ....##....##.....##.########.##.....##.########..######.\n\n# ----------------------------------------------------------------- #\n\n# The official Theme code\n\n#################### ChangeLookAndFeel #######################\n# Predefined settings that will change the colors and styles #\n# of the elements. #\n##############################################################\nLOOK_AND_FEEL_TABLE = {\n \"SystemDefault\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT, \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT,\n \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR, \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1,\n \"SLIDER_DEPTH\": 1, \"PROGRESS_DEPTH\": 0, },\n \"SystemDefaultForReal\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT,\n \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT, \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": COLOR_SYSTEM_DEFAULT,\n \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1, \"SLIDER_DEPTH\": 1, \"PROGRESS_DEPTH\": 0, },\n \"SystemDefault1\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT, \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT,\n \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": COLOR_SYSTEM_DEFAULT, \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1, \"SLIDER_DEPTH\": 1,\n \"PROGRESS_DEPTH\": 0, },\n \"Material1\": {\"BACKGROUND\": \"#E3F2FD\", \"TEXT\": \"#000000\", \"INPUT\": \"#86A8FF\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#86A8FF\",\n \"BUTTON\": (\"#FFFFFF\", \"#5079D3\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 0, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"ACCENT1\": \"#FF0266\", \"ACCENT2\": \"#FF5C93\", \"ACCENT3\": \"#C5003C\", },\n \"Material2\": {\"BACKGROUND\": \"#FAFAFA\", \"TEXT\": \"#000000\", \"INPUT\": \"#004EA1\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#5EA7FF\",\n \"BUTTON\": (\"#FFFFFF\", \"#0079D3\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 0, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"ACCENT1\": \"#FF0266\", \"ACCENT2\": \"#FF5C93\", \"ACCENT3\": \"#C5003C\", },\n \"Reddit\": {\"BACKGROUND\": \"#ffffff\", \"TEXT\": \"#1a1a1b\", \"INPUT\": \"#dae0e6\", \"TEXT_INPUT\": \"#222222\", \"SCROLL\": \"#a5a4a4\", \"BUTTON\": (\"#FFFFFF\", \"#0079d3\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, \"ACCENT1\": \"#ff5414\", \"ACCENT2\": \"#33a8ff\",\n \"ACCENT3\": \"#dbf0ff\", },\n \"Topanga\": {\"BACKGROUND\": \"#282923\", \"TEXT\": \"#E7DB74\", \"INPUT\": \"#393a32\", \"TEXT_INPUT\": \"#E7C855\", \"SCROLL\": \"#E7C855\", \"BUTTON\": (\"#E7C855\", \"#284B5A\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, \"ACCENT1\": \"#c15226\", \"ACCENT2\": \"#7a4d5f\",\n \"ACCENT3\": \"#889743\", },\n \"GreenTan\": {\"BACKGROUND\": \"#9FB8AD\", \"TEXT\": '#000000', \"INPUT\": \"#F7F3EC\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#F7F3EC\", \"BUTTON\": (\"#FFFFFF\", \"#475841\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Dark\": {\"BACKGROUND\": \"#404040\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#4D4D4D\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#707070\", \"BUTTON\": (\"#FFFFFF\", \"#004F00\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightGreen\": {\"BACKGROUND\": \"#B7CECE\", \"TEXT\": \"#000000\", \"INPUT\": \"#FDFFF7\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#FDFFF7\",\n \"BUTTON\": (\"#FFFFFF\", \"#658268\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"ACCENT1\": \"#76506d\",\n \"ACCENT2\": \"#5148f1\", \"ACCENT3\": \"#0a1c84\", \"PROGRESS_DEPTH\": 0, },\n \"Dark2\": {\"BACKGROUND\": \"#404040\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#FFFFFF\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#707070\", \"BUTTON\": (\"#FFFFFF\", \"#004F00\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Black\": {\"BACKGROUND\": \"#000000\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#4D4D4D\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#707070\", \"BUTTON\": (\"#000000\", \"#FFFFFF\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Tan\": {\"BACKGROUND\": \"#fdf6e3\", \"TEXT\": \"#268bd1\", \"INPUT\": \"#eee8d5\", \"TEXT_INPUT\": \"#6c71c3\", \"SCROLL\": \"#eee8d5\", \"BUTTON\": (\"#FFFFFF\", \"#063542\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"TanBlue\": {\"BACKGROUND\": \"#e5dece\", \"TEXT\": \"#063289\", \"INPUT\": \"#f9f8f4\", \"TEXT_INPUT\": \"#242834\", \"SCROLL\": \"#eee8d5\", \"BUTTON\": (\"#FFFFFF\", \"#063289\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkTanBlue\": {\"BACKGROUND\": \"#242834\", \"TEXT\": \"#dfe6f8\", \"INPUT\": \"#97755c\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#a9afbb\",\n \"BUTTON\": (\"#FFFFFF\", \"#063289\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkAmber\": {\"BACKGROUND\": \"#2c2825\", \"TEXT\": \"#fdcb52\", \"INPUT\": \"#705e52\", \"TEXT_INPUT\": \"#fdcb52\", \"SCROLL\": \"#705e52\",\n \"BUTTON\": (\"#000000\", \"#fdcb52\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBlue\": {\"BACKGROUND\": \"#1a2835\", \"TEXT\": \"#d1ecff\", \"INPUT\": \"#335267\", \"TEXT_INPUT\": \"#acc2d0\", \"SCROLL\": \"#1b6497\", \"BUTTON\": (\"#000000\", \"#fafaf8\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Reds\": {\"BACKGROUND\": \"#280001\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#d8d584\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#763e00\", \"BUTTON\": (\"#000000\", \"#daad28\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Green\": {\"BACKGROUND\": \"#82a459\", \"TEXT\": \"#000000\", \"INPUT\": \"#d8d584\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e3ecf3\", \"BUTTON\": (\"#FFFFFF\", \"#517239\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"BluePurple\": {\"BACKGROUND\": \"#A5CADD\", \"TEXT\": \"#6E266E\", \"INPUT\": \"#E0F5FF\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#E0F5FF\",\n \"BUTTON\": (\"#FFFFFF\", \"#303952\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Purple\": {\"BACKGROUND\": \"#B0AAC2\", \"TEXT\": \"#000000\", \"INPUT\": \"#F2EFE8\", \"SCROLL\": \"#F2EFE8\", \"TEXT_INPUT\": \"#000000\", \"BUTTON\": (\"#000000\", \"#C2D4D8\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"BlueMono\": {\"BACKGROUND\": \"#AAB6D3\", \"TEXT\": \"#000000\", \"INPUT\": \"#F1F4FC\", \"SCROLL\": \"#F1F4FC\", \"TEXT_INPUT\": \"#000000\", \"BUTTON\": (\"#FFFFFF\", \"#7186C7\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"GreenMono\": {\"BACKGROUND\": \"#A8C1B4\", \"TEXT\": \"#000000\", \"INPUT\": \"#DDE0DE\", \"SCROLL\": \"#E3E3E3\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#FFFFFF\", \"#6D9F85\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"BrownBlue\": {\"BACKGROUND\": \"#64778d\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#f0f3f7\", \"SCROLL\": \"#A6B2BE\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#FFFFFF\", \"#283b5b\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"BrightColors\": {\"BACKGROUND\": \"#b4ffb4\", \"TEXT\": \"#000000\", \"INPUT\": \"#ffff64\", \"SCROLL\": \"#ffb482\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#000000\", \"#ffa0dc\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"NeutralBlue\": {\"BACKGROUND\": \"#92aa9d\", \"TEXT\": \"#000000\", \"INPUT\": \"#fcfff6\", \"SCROLL\": \"#fcfff6\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#000000\", \"#d0dbbd\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Kayak\": {\"BACKGROUND\": \"#a7ad7f\", \"TEXT\": \"#000000\", \"INPUT\": \"#e6d3a8\", \"SCROLL\": \"#e6d3a8\", \"TEXT_INPUT\": \"#000000\", \"BUTTON\": (\"#FFFFFF\", \"#5d907d\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"SandyBeach\": {\"BACKGROUND\": \"#efeccb\", \"TEXT\": \"#012f2f\", \"INPUT\": \"#e6d3a8\", \"SCROLL\": \"#e6d3a8\", \"TEXT_INPUT\": \"#012f2f\",\n \"BUTTON\": (\"#FFFFFF\", \"#046380\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"TealMono\": {\"BACKGROUND\": \"#a8cfdd\", \"TEXT\": \"#000000\", \"INPUT\": \"#dfedf2\", \"SCROLL\": \"#dfedf2\", \"TEXT_INPUT\": \"#000000\", \"BUTTON\": (\"#FFFFFF\", \"#183440\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Default\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT, \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT,\n \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR, \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1, \"SLIDER_DEPTH\": 1,\n \"PROGRESS_DEPTH\": 0, },\n \"Default1\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT, \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT,\n \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": COLOR_SYSTEM_DEFAULT, \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1, \"SLIDER_DEPTH\": 1,\n \"PROGRESS_DEPTH\": 0, },\n \"DefaultNoMoreNagging\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT,\n \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT, \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR,\n \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1, \"SLIDER_DEPTH\": 1, \"PROGRESS_DEPTH\": 0, },\n \"GrayGrayGray\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT, \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT,\n \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": COLOR_SYSTEM_DEFAULT, \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1, \"SLIDER_DEPTH\": 1,\n \"PROGRESS_DEPTH\": 0, },\n \"LightBlue\": {\"BACKGROUND\": \"#E3F2FD\", \"TEXT\": \"#000000\", \"INPUT\": \"#86A8FF\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#86A8FF\",\n \"BUTTON\": (\"#FFFFFF\", \"#5079D3\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 0, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"ACCENT1\": \"#FF0266\", \"ACCENT2\": \"#FF5C93\", \"ACCENT3\": \"#C5003C\", },\n \"LightGrey\": {\"BACKGROUND\": \"#FAFAFA\", \"TEXT\": \"#000000\", \"INPUT\": \"#004EA1\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#5EA7FF\",\n \"BUTTON\": (\"#FFFFFF\", \"#0079D3\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 0, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"ACCENT1\": \"#FF0266\", \"ACCENT2\": \"#FF5C93\", \"ACCENT3\": \"#C5003C\", },\n \"LightGrey1\": {\"BACKGROUND\": \"#ffffff\", \"TEXT\": \"#1a1a1b\", \"INPUT\": \"#dae0e6\", \"TEXT_INPUT\": \"#222222\", \"SCROLL\": \"#a5a4a4\",\n \"BUTTON\": (\"#FFFFFF\", \"#0079d3\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"ACCENT1\": \"#ff5414\", \"ACCENT2\": \"#33a8ff\", \"ACCENT3\": \"#dbf0ff\", },\n \"DarkBrown\": {\"BACKGROUND\": \"#282923\", \"TEXT\": \"#E7DB74\", \"INPUT\": \"#393a32\", \"TEXT_INPUT\": \"#E7C855\", \"SCROLL\": \"#E7C855\",\n \"BUTTON\": (\"#E7C855\", \"#284B5A\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"ACCENT1\": \"#c15226\", \"ACCENT2\": \"#7a4d5f\", \"ACCENT3\": \"#889743\", },\n \"LightGreen1\": {\"BACKGROUND\": \"#9FB8AD\", \"TEXT\": \"#000000\", \"INPUT\": \"#F7F3EC\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#F7F3EC\",\n \"BUTTON\": (\"#FFFFFF\", \"#475841\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey\": {\"BACKGROUND\": \"#404040\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#4D4D4D\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#707070\", \"BUTTON\": (\"#FFFFFF\", \"#004F00\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightGreen2\": {\"BACKGROUND\": \"#B7CECE\", \"TEXT\": \"#000000\", \"INPUT\": \"#FDFFF7\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#FDFFF7\",\n \"BUTTON\": (\"#FFFFFF\", \"#658268\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"ACCENT1\": \"#76506d\",\n \"ACCENT2\": \"#5148f1\", \"ACCENT3\": \"#0a1c84\", \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey1\": {\"BACKGROUND\": \"#404040\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#FFFFFF\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#707070\",\n \"BUTTON\": (\"#FFFFFF\", \"#004F00\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBlack\": {\"BACKGROUND\": \"#000000\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#4D4D4D\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#707070\",\n \"BUTTON\": (\"#000000\", \"#FFFFFF\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBrown\": {\"BACKGROUND\": \"#fdf6e3\", \"TEXT\": \"#268bd1\", \"INPUT\": \"#eee8d5\", \"TEXT_INPUT\": \"#6c71c3\", \"SCROLL\": \"#eee8d5\",\n \"BUTTON\": (\"#FFFFFF\", \"#063542\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBrown1\": {\"BACKGROUND\": \"#e5dece\", \"TEXT\": \"#063289\", \"INPUT\": \"#f9f8f4\", \"TEXT_INPUT\": \"#242834\", \"SCROLL\": \"#eee8d5\",\n \"BUTTON\": (\"#FFFFFF\", \"#063289\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBlue1\": {\"BACKGROUND\": \"#242834\", \"TEXT\": \"#dfe6f8\", \"INPUT\": \"#97755c\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#a9afbb\",\n \"BUTTON\": (\"#FFFFFF\", \"#063289\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBrown1\": {\"BACKGROUND\": \"#2c2825\", \"TEXT\": \"#fdcb52\", \"INPUT\": \"#705e52\", \"TEXT_INPUT\": \"#fdcb52\", \"SCROLL\": \"#705e52\",\n \"BUTTON\": (\"#000000\", \"#fdcb52\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBlue2\": {\"BACKGROUND\": \"#1a2835\", \"TEXT\": \"#d1ecff\", \"INPUT\": \"#335267\", \"TEXT_INPUT\": \"#acc2d0\", \"SCROLL\": \"#1b6497\",\n \"BUTTON\": (\"#000000\", \"#fafaf8\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBrown2\": {\"BACKGROUND\": \"#280001\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#d8d584\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#763e00\",\n \"BUTTON\": (\"#000000\", \"#daad28\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGreen\": {\"BACKGROUND\": \"#82a459\", \"TEXT\": \"#000000\", \"INPUT\": \"#d8d584\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e3ecf3\",\n \"BUTTON\": (\"#FFFFFF\", \"#517239\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBlue1\": {\"BACKGROUND\": \"#A5CADD\", \"TEXT\": \"#6E266E\", \"INPUT\": \"#E0F5FF\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#E0F5FF\",\n \"BUTTON\": (\"#FFFFFF\", \"#303952\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightPurple\": {\"BACKGROUND\": \"#B0AAC2\", \"TEXT\": \"#000000\", \"INPUT\": \"#F2EFE8\", \"SCROLL\": \"#F2EFE8\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#000000\", \"#C2D4D8\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBlue2\": {\"BACKGROUND\": \"#AAB6D3\", \"TEXT\": \"#000000\", \"INPUT\": \"#F1F4FC\", \"SCROLL\": \"#F1F4FC\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#FFFFFF\", \"#7186C7\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightGreen3\": {\"BACKGROUND\": \"#A8C1B4\", \"TEXT\": \"#000000\", \"INPUT\": \"#DDE0DE\", \"SCROLL\": \"#E3E3E3\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#FFFFFF\", \"#6D9F85\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBlue3\": {\"BACKGROUND\": \"#64778d\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#f0f3f7\", \"SCROLL\": \"#A6B2BE\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#FFFFFF\", \"#283b5b\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightGreen4\": {\"BACKGROUND\": \"#b4ffb4\", \"TEXT\": \"#000000\", \"INPUT\": \"#ffff64\", \"SCROLL\": \"#ffb482\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#000000\", \"#ffa0dc\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightGreen5\": {\"BACKGROUND\": \"#92aa9d\", \"TEXT\": \"#000000\", \"INPUT\": \"#fcfff6\", \"SCROLL\": \"#fcfff6\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#000000\", \"#d0dbbd\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBrown2\": {\"BACKGROUND\": \"#a7ad7f\", \"TEXT\": \"#000000\", \"INPUT\": \"#e6d3a8\", \"SCROLL\": \"#e6d3a8\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#FFFFFF\", \"#5d907d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBrown3\": {\"BACKGROUND\": \"#efeccb\", \"TEXT\": \"#012f2f\", \"INPUT\": \"#e6d3a8\", \"SCROLL\": \"#e6d3a8\", \"TEXT_INPUT\": \"#012f2f\",\n \"BUTTON\": (\"#FFFFFF\", \"#046380\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBlue3\": {\"BACKGROUND\": \"#a8cfdd\", \"TEXT\": \"#000000\", \"INPUT\": \"#dfedf2\", \"SCROLL\": \"#dfedf2\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#FFFFFF\", \"#183440\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBrown4\": {\"BACKGROUND\": \"#d7c79e\", \"TEXT\": \"#a35638\", \"INPUT\": \"#9dab86\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#a35638\",\n \"BUTTON\": (\"#FFFFFF\", \"#a35638\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#a35638\", \"#9dab86\", \"#e08f62\", \"#d7c79e\"], },\n \"DarkTeal\": {\"BACKGROUND\": \"#003f5c\", \"TEXT\": \"#fb5b5a\", \"INPUT\": \"#bc4873\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#bc4873\", \"BUTTON\": (\"#FFFFFF\", \"#fb5b5a\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#003f5c\", \"#472b62\", \"#bc4873\", \"#fb5b5a\"], },\n \"DarkPurple\": {\"BACKGROUND\": \"#472b62\", \"TEXT\": \"#fb5b5a\", \"INPUT\": \"#bc4873\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#bc4873\",\n \"BUTTON\": (\"#FFFFFF\", \"#472b62\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#003f5c\", \"#472b62\", \"#bc4873\", \"#fb5b5a\"], },\n \"LightGreen6\": {\"BACKGROUND\": \"#eafbea\", \"TEXT\": \"#1f6650\", \"INPUT\": \"#6f9a8d\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#1f6650\",\n \"BUTTON\": (\"#FFFFFF\", \"#1f6650\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#1f6650\", \"#6f9a8d\", \"#ea5e5e\", \"#eafbea\"], },\n \"DarkGrey2\": {\"BACKGROUND\": \"#2b2b28\", \"TEXT\": \"#f8f8f8\", \"INPUT\": \"#f1d6ab\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#f1d6ab\",\n \"BUTTON\": (\"#2b2b28\", \"#e3b04b\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#2b2b28\", \"#e3b04b\", \"#f1d6ab\", \"#f8f8f8\"], },\n \"LightBrown6\": {\"BACKGROUND\": \"#f9b282\", \"TEXT\": \"#8f4426\", \"INPUT\": \"#de6b35\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#8f4426\",\n \"BUTTON\": (\"#FFFFFF\", \"#8f4426\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#8f4426\", \"#de6b35\", \"#64ccda\", \"#f9b282\"], },\n \"DarkTeal1\": {\"BACKGROUND\": \"#396362\", \"TEXT\": \"#ffe7d1\", \"INPUT\": \"#f6c89f\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#f6c89f\",\n \"BUTTON\": (\"#ffe7d1\", \"#4b8e8d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#396362\", \"#4b8e8d\", \"#f6c89f\", \"#ffe7d1\"], },\n \"LightBrown7\": {\"BACKGROUND\": \"#f6c89f\", \"TEXT\": \"#396362\", \"INPUT\": \"#4b8e8d\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#396362\",\n \"BUTTON\": (\"#FFFFFF\", \"#396362\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#396362\", \"#4b8e8d\", \"#f6c89f\", \"#ffe7d1\"], },\n \"DarkPurple1\": {\"BACKGROUND\": \"#0c093c\", \"TEXT\": \"#fad6d6\", \"INPUT\": \"#eea5f6\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#eea5f6\",\n \"BUTTON\": (\"#FFFFFF\", \"#df42d1\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#0c093c\", \"#df42d1\", \"#eea5f6\", \"#fad6d6\"], },\n \"DarkGrey3\": {\"BACKGROUND\": \"#211717\", \"TEXT\": \"#dfddc7\", \"INPUT\": \"#f58b54\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#f58b54\",\n \"BUTTON\": (\"#dfddc7\", \"#a34a28\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#211717\", \"#a34a28\", \"#f58b54\", \"#dfddc7\"], },\n \"LightBrown8\": {\"BACKGROUND\": \"#dfddc7\", \"TEXT\": \"#211717\", \"INPUT\": \"#a34a28\", \"TEXT_INPUT\": \"#dfddc7\", \"SCROLL\": \"#211717\",\n \"BUTTON\": (\"#dfddc7\", \"#a34a28\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#211717\", \"#a34a28\", \"#f58b54\", \"#dfddc7\"], },\n \"DarkBlue4\": {\"BACKGROUND\": \"#494ca2\", \"TEXT\": \"#e3e7f1\", \"INPUT\": \"#c6cbef\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#c6cbef\",\n \"BUTTON\": (\"#FFFFFF\", \"#8186d5\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#494ca2\", \"#8186d5\", \"#c6cbef\", \"#e3e7f1\"], },\n \"LightBlue4\": {\"BACKGROUND\": \"#5c94bd\", \"TEXT\": \"#470938\", \"INPUT\": \"#1a3e59\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#470938\",\n \"BUTTON\": (\"#FFFFFF\", \"#470938\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#470938\", \"#1a3e59\", \"#5c94bd\", \"#f2d6eb\"], },\n \"DarkTeal2\": {\"BACKGROUND\": \"#394a6d\", \"TEXT\": \"#c0ffb3\", \"INPUT\": \"#52de97\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#52de97\",\n \"BUTTON\": (\"#c0ffb3\", \"#394a6d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#394a6d\", \"#3c9d9b\", \"#52de97\", \"#c0ffb3\"], },\n \"DarkTeal3\": {\"BACKGROUND\": \"#3c9d9b\", \"TEXT\": \"#c0ffb3\", \"INPUT\": \"#52de97\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#52de97\",\n \"BUTTON\": (\"#c0ffb3\", \"#394a6d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#394a6d\", \"#3c9d9b\", \"#52de97\", \"#c0ffb3\"], },\n \"DarkPurple5\": {\"BACKGROUND\": \"#730068\", \"TEXT\": \"#f6f078\", \"INPUT\": \"#01d28e\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#01d28e\",\n \"BUTTON\": (\"#f6f078\", \"#730068\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#730068\", \"#434982\", \"#01d28e\", \"#f6f078\"], },\n \"DarkPurple2\": {\"BACKGROUND\": \"#202060\", \"TEXT\": \"#b030b0\", \"INPUT\": \"#602080\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#602080\",\n \"BUTTON\": (\"#FFFFFF\", \"#202040\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#202040\", \"#202060\", \"#602080\", \"#b030b0\"], },\n \"DarkBlue5\": {\"BACKGROUND\": \"#000272\", \"TEXT\": \"#ff6363\", \"INPUT\": \"#a32f80\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#a32f80\",\n \"BUTTON\": (\"#FFFFFF\", \"#341677\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#000272\", \"#341677\", \"#a32f80\", \"#ff6363\"], },\n \"LightGrey2\": {\"BACKGROUND\": \"#f6f6f6\", \"TEXT\": \"#420000\", \"INPUT\": \"#d4d7dd\", \"TEXT_INPUT\": \"#420000\", \"SCROLL\": \"#420000\",\n \"BUTTON\": (\"#420000\", \"#d4d7dd\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#420000\", \"#d4d7dd\", \"#eae9e9\", \"#f6f6f6\"], },\n \"LightGrey3\": {\"BACKGROUND\": \"#eae9e9\", \"TEXT\": \"#420000\", \"INPUT\": \"#d4d7dd\", \"TEXT_INPUT\": \"#420000\", \"SCROLL\": \"#420000\",\n \"BUTTON\": (\"#420000\", \"#d4d7dd\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#420000\", \"#d4d7dd\", \"#eae9e9\", \"#f6f6f6\"], },\n \"DarkBlue6\": {\"BACKGROUND\": \"#01024e\", \"TEXT\": \"#ff6464\", \"INPUT\": \"#8b4367\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#8b4367\",\n \"BUTTON\": (\"#FFFFFF\", \"#543864\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#01024e\", \"#543864\", \"#8b4367\", \"#ff6464\"], },\n \"DarkBlue7\": {\"BACKGROUND\": \"#241663\", \"TEXT\": \"#eae7af\", \"INPUT\": \"#a72693\", \"TEXT_INPUT\": \"#eae7af\", \"SCROLL\": \"#a72693\",\n \"BUTTON\": (\"#eae7af\", \"#160f30\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#160f30\", \"#241663\", \"#a72693\", \"#eae7af\"], },\n \"LightBrown9\": {\"BACKGROUND\": \"#f6d365\", \"TEXT\": \"#3a1f5d\", \"INPUT\": \"#c83660\", \"TEXT_INPUT\": \"#f6d365\", \"SCROLL\": \"#3a1f5d\",\n \"BUTTON\": (\"#f6d365\", \"#c83660\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#3a1f5d\", \"#c83660\", \"#e15249\", \"#f6d365\"], },\n \"DarkPurple3\": {\"BACKGROUND\": \"#6e2142\", \"TEXT\": \"#ffd692\", \"INPUT\": \"#e16363\", \"TEXT_INPUT\": \"#ffd692\", \"SCROLL\": \"#e16363\",\n \"BUTTON\": (\"#ffd692\", \"#943855\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#6e2142\", \"#943855\", \"#e16363\", \"#ffd692\"], },\n \"LightBrown10\": {\"BACKGROUND\": \"#ffd692\", \"TEXT\": \"#6e2142\", \"INPUT\": \"#943855\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#6e2142\",\n \"BUTTON\": (\"#FFFFFF\", \"#6e2142\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#6e2142\", \"#943855\", \"#e16363\", \"#ffd692\"], },\n \"DarkPurple4\": {\"BACKGROUND\": \"#200f21\", \"TEXT\": \"#f638dc\", \"INPUT\": \"#5a3d5c\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#5a3d5c\",\n \"BUTTON\": (\"#FFFFFF\", \"#382039\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#200f21\", \"#382039\", \"#5a3d5c\", \"#f638dc\"], },\n \"LightBlue5\": {\"BACKGROUND\": \"#b2fcff\", \"TEXT\": \"#3e64ff\", \"INPUT\": \"#5edfff\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#3e64ff\",\n \"BUTTON\": (\"#FFFFFF\", \"#3e64ff\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#3e64ff\", \"#5edfff\", \"#b2fcff\", \"#ecfcff\"], },\n \"DarkTeal4\": {\"BACKGROUND\": \"#464159\", \"TEXT\": \"#c7f0db\", \"INPUT\": \"#8bbabb\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#8bbabb\",\n \"BUTTON\": (\"#FFFFFF\", \"#6c7b95\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#464159\", \"#6c7b95\", \"#8bbabb\", \"#c7f0db\"], },\n \"LightTeal\": {\"BACKGROUND\": \"#c7f0db\", \"TEXT\": \"#464159\", \"INPUT\": \"#6c7b95\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#464159\",\n \"BUTTON\": (\"#FFFFFF\", \"#464159\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#464159\", \"#6c7b95\", \"#8bbabb\", \"#c7f0db\"], },\n \"DarkTeal5\": {\"BACKGROUND\": \"#8bbabb\", \"TEXT\": \"#464159\", \"INPUT\": \"#6c7b95\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#464159\",\n \"BUTTON\": (\"#c7f0db\", \"#6c7b95\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#464159\", \"#6c7b95\", \"#8bbabb\", \"#c7f0db\"], },\n \"LightGrey4\": {\"BACKGROUND\": \"#faf5ef\", \"TEXT\": \"#672f2f\", \"INPUT\": \"#99b19c\", \"TEXT_INPUT\": \"#672f2f\", \"SCROLL\": \"#672f2f\",\n \"BUTTON\": (\"#672f2f\", \"#99b19c\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#672f2f\", \"#99b19c\", \"#d7d1c9\", \"#faf5ef\"], },\n \"LightGreen7\": {\"BACKGROUND\": \"#99b19c\", \"TEXT\": \"#faf5ef\", \"INPUT\": \"#d7d1c9\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#d7d1c9\",\n \"BUTTON\": (\"#FFFFFF\", \"#99b19c\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#672f2f\", \"#99b19c\", \"#d7d1c9\", \"#faf5ef\"], },\n \"LightGrey5\": {\"BACKGROUND\": \"#d7d1c9\", \"TEXT\": \"#672f2f\", \"INPUT\": \"#99b19c\", \"TEXT_INPUT\": \"#672f2f\", \"SCROLL\": \"#672f2f\",\n \"BUTTON\": (\"#FFFFFF\", \"#672f2f\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#672f2f\", \"#99b19c\", \"#d7d1c9\", \"#faf5ef\"], },\n \"DarkBrown3\": {\"BACKGROUND\": \"#a0855b\", \"TEXT\": \"#f9f6f2\", \"INPUT\": \"#f1d6ab\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#f1d6ab\",\n \"BUTTON\": (\"#FFFFFF\", \"#38470b\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#38470b\", \"#a0855b\", \"#f1d6ab\", \"#f9f6f2\"], },\n \"LightBrown11\": {\"BACKGROUND\": \"#f1d6ab\", \"TEXT\": \"#38470b\", \"INPUT\": \"#a0855b\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#38470b\",\n \"BUTTON\": (\"#f9f6f2\", \"#a0855b\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#38470b\", \"#a0855b\", \"#f1d6ab\", \"#f9f6f2\"], },\n \"DarkRed\": {\"BACKGROUND\": \"#83142c\", \"TEXT\": \"#f9d276\", \"INPUT\": \"#ad1d45\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#ad1d45\", \"BUTTON\": (\"#f9d276\", \"#ad1d45\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#44000d\", \"#83142c\", \"#ad1d45\", \"#f9d276\"], },\n \"DarkTeal6\": {\"BACKGROUND\": \"#204969\", \"TEXT\": \"#fff7f7\", \"INPUT\": \"#dadada\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#dadada\",\n \"BUTTON\": (\"#000000\", \"#fff7f7\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#204969\", \"#08ffc8\", \"#dadada\", \"#fff7f7\"], },\n \"DarkBrown4\": {\"BACKGROUND\": \"#252525\", \"TEXT\": \"#ff0000\", \"INPUT\": \"#af0404\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#af0404\",\n \"BUTTON\": (\"#FFFFFF\", \"#252525\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#252525\", \"#414141\", \"#af0404\", \"#ff0000\"], },\n \"LightYellow\": {\"BACKGROUND\": \"#f4ff61\", \"TEXT\": \"#27aa80\", \"INPUT\": \"#32ff6a\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#27aa80\",\n \"BUTTON\": (\"#f4ff61\", \"#27aa80\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#27aa80\", \"#32ff6a\", \"#a8ff3e\", \"#f4ff61\"], },\n \"DarkGreen1\": {\"BACKGROUND\": \"#2b580c\", \"TEXT\": \"#fdef96\", \"INPUT\": \"#f7b71d\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#f7b71d\",\n \"BUTTON\": (\"#fdef96\", \"#2b580c\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#2b580c\", \"#afa939\", \"#f7b71d\", \"#fdef96\"], },\n \"LightGreen8\": {\"BACKGROUND\": \"#c8dad3\", \"TEXT\": \"#63707e\", \"INPUT\": \"#93b5b3\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#63707e\",\n \"BUTTON\": (\"#FFFFFF\", \"#63707e\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#63707e\", \"#93b5b3\", \"#c8dad3\", \"#f2f6f5\"], },\n \"DarkTeal7\": {\"BACKGROUND\": \"#248ea9\", \"TEXT\": \"#fafdcb\", \"INPUT\": \"#aee7e8\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#aee7e8\",\n \"BUTTON\": (\"#000000\", \"#fafdcb\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#248ea9\", \"#28c3d4\", \"#aee7e8\", \"#fafdcb\"], },\n \"DarkBlue8\": {\"BACKGROUND\": \"#454d66\", \"TEXT\": \"#d9d872\", \"INPUT\": \"#58b368\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#58b368\",\n \"BUTTON\": (\"#000000\", \"#009975\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#009975\", \"#454d66\", \"#58b368\", \"#d9d872\"], },\n \"DarkBlue9\": {\"BACKGROUND\": \"#263859\", \"TEXT\": \"#ff6768\", \"INPUT\": \"#6b778d\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#6b778d\",\n \"BUTTON\": (\"#ff6768\", \"#263859\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#17223b\", \"#263859\", \"#6b778d\", \"#ff6768\"], },\n \"DarkBlue10\": {\"BACKGROUND\": \"#0028ff\", \"TEXT\": \"#f1f4df\", \"INPUT\": \"#10eaf0\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#10eaf0\",\n \"BUTTON\": (\"#f1f4df\", \"#24009c\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#24009c\", \"#0028ff\", \"#10eaf0\", \"#f1f4df\"], },\n \"DarkBlue11\": {\"BACKGROUND\": \"#6384b3\", \"TEXT\": \"#e6f0b6\", \"INPUT\": \"#b8e9c0\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#b8e9c0\",\n \"BUTTON\": (\"#e6f0b6\", \"#684949\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#684949\", \"#6384b3\", \"#b8e9c0\", \"#e6f0b6\"], },\n \"DarkTeal8\": {\"BACKGROUND\": \"#71a0a5\", \"TEXT\": \"#212121\", \"INPUT\": \"#665c84\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#212121\",\n \"BUTTON\": (\"#fab95b\", \"#665c84\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#212121\", \"#665c84\", \"#71a0a5\", \"#fab95b\"], },\n \"DarkRed1\": {\"BACKGROUND\": \"#c10000\", \"TEXT\": \"#eeeeee\", \"INPUT\": \"#dedede\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#dedede\", \"BUTTON\": (\"#c10000\", \"#eeeeee\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#c10000\", \"#ff4949\", \"#dedede\", \"#eeeeee\"], },\n \"LightBrown5\": {\"BACKGROUND\": \"#fff591\", \"TEXT\": \"#e41749\", \"INPUT\": \"#f5587b\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e41749\",\n \"BUTTON\": (\"#fff591\", \"#e41749\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#e41749\", \"#f5587b\", \"#ff8a5c\", \"#fff591\"], },\n \"LightGreen9\": {\"BACKGROUND\": \"#f1edb3\", \"TEXT\": \"#3b503d\", \"INPUT\": \"#4a746e\", \"TEXT_INPUT\": \"#f1edb3\", \"SCROLL\": \"#3b503d\",\n \"BUTTON\": (\"#f1edb3\", \"#3b503d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#3b503d\", \"#4a746e\", \"#c8cf94\", \"#f1edb3\"], \"DESCRIPTION\": [\"Green\", \"Turquoise\", \"Yellow\"], },\n \"DarkGreen2\": {\"BACKGROUND\": \"#3b503d\", \"TEXT\": \"#f1edb3\", \"INPUT\": \"#c8cf94\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#c8cf94\",\n \"BUTTON\": (\"#f1edb3\", \"#3b503d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#3b503d\", \"#4a746e\", \"#c8cf94\", \"#f1edb3\"], \"DESCRIPTION\": [\"Green\", \"Turquoise\", \"Yellow\"], },\n \"LightGray1\": {\"BACKGROUND\": \"#f2f2f2\", \"TEXT\": \"#222831\", \"INPUT\": \"#393e46\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#222831\",\n \"BUTTON\": (\"#f2f2f2\", \"#222831\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#222831\", \"#393e46\", \"#f96d00\", \"#f2f2f2\"], \"DESCRIPTION\": [\"#000000\", \"Grey\", \"Orange\", \"Grey\", \"Autumn\"], },\n \"DarkGrey4\": {\"BACKGROUND\": \"#52524e\", \"TEXT\": \"#e9e9e5\", \"INPUT\": \"#d4d6c8\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#d4d6c8\",\n \"BUTTON\": (\"#FFFFFF\", \"#9a9b94\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#52524e\", \"#9a9b94\", \"#d4d6c8\", \"#e9e9e5\"], \"DESCRIPTION\": [\"Grey\", \"Pastel\", \"Winter\"], },\n \"DarkBlue12\": {\"BACKGROUND\": \"#324e7b\", \"TEXT\": \"#f8f8f8\", \"INPUT\": \"#86a6df\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#86a6df\",\n \"BUTTON\": (\"#FFFFFF\", \"#5068a9\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#324e7b\", \"#5068a9\", \"#86a6df\", \"#f8f8f8\"], \"DESCRIPTION\": [\"Blue\", \"Grey\", \"Cold\", \"Winter\"], },\n \"DarkPurple6\": {\"BACKGROUND\": \"#070739\", \"TEXT\": \"#e1e099\", \"INPUT\": \"#c327ab\", \"TEXT_INPUT\": \"#e1e099\", \"SCROLL\": \"#c327ab\",\n \"BUTTON\": (\"#e1e099\", \"#521477\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#070739\", \"#521477\", \"#c327ab\", \"#e1e099\"], \"DESCRIPTION\": [\"#000000\", \"Purple\", \"Yellow\", \"Dark\"], },\n \"DarkPurple7\": {\"BACKGROUND\": \"#191930\", \"TEXT\": \"#B1B7C5\", \"INPUT\": \"#232B5C\", \"TEXT_INPUT\": \"#D0E3E7\", \"SCROLL\": \"#B1B7C5\",\n \"BUTTON\": (\"#272D38\", \"#B1B7C5\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBlue13\": {\"BACKGROUND\": \"#203562\", \"TEXT\": \"#e3e8f8\", \"INPUT\": \"#c0c5cd\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#c0c5cd\",\n \"BUTTON\": (\"#FFFFFF\", \"#3e588f\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#203562\", \"#3e588f\", \"#c0c5cd\", \"#e3e8f8\"], \"DESCRIPTION\": [\"Blue\", \"Grey\", \"Wedding\", \"Cold\"], },\n \"DarkBrown5\": {\"BACKGROUND\": \"#3c1b1f\", \"TEXT\": \"#f6e1b5\", \"INPUT\": \"#e2bf81\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e2bf81\",\n \"BUTTON\": (\"#3c1b1f\", \"#f6e1b5\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#3c1b1f\", \"#b21e4b\", \"#e2bf81\", \"#f6e1b5\"], \"DESCRIPTION\": [\"Brown\", \"Red\", \"Yellow\", \"Warm\"], },\n \"DarkGreen3\": {\"BACKGROUND\": \"#062121\", \"TEXT\": \"#eeeeee\", \"INPUT\": \"#e4dcad\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e4dcad\",\n \"BUTTON\": (\"#eeeeee\", \"#181810\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#062121\", \"#181810\", \"#e4dcad\", \"#eeeeee\"], \"DESCRIPTION\": [\"#000000\", \"#000000\", \"Brown\", \"Grey\"], },\n \"DarkBlack1\": {\"BACKGROUND\": \"#181810\", \"TEXT\": \"#eeeeee\", \"INPUT\": \"#e4dcad\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e4dcad\",\n \"BUTTON\": (\"#FFFFFF\", \"#062121\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#062121\", \"#181810\", \"#e4dcad\", \"#eeeeee\"], \"DESCRIPTION\": [\"#000000\", \"#000000\", \"Brown\", \"Grey\"], },\n \"DarkGrey5\": {\"BACKGROUND\": \"#343434\", \"TEXT\": \"#f3f3f3\", \"INPUT\": \"#e9dcbe\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e9dcbe\",\n \"BUTTON\": (\"#FFFFFF\", \"#8e8b82\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#343434\", \"#8e8b82\", \"#e9dcbe\", \"#f3f3f3\"], \"DESCRIPTION\": [\"Grey\", \"Brown\"], },\n \"LightBrown12\": {\"BACKGROUND\": \"#8e8b82\", \"TEXT\": \"#f3f3f3\", \"INPUT\": \"#e9dcbe\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e9dcbe\",\n \"BUTTON\": (\"#f3f3f3\", \"#8e8b82\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#343434\", \"#8e8b82\", \"#e9dcbe\", \"#f3f3f3\"], \"DESCRIPTION\": [\"Grey\", \"Brown\"], },\n \"DarkTeal9\": {\"BACKGROUND\": \"#13445a\", \"TEXT\": \"#fef4e8\", \"INPUT\": \"#446878\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#446878\",\n \"BUTTON\": (\"#fef4e8\", \"#446878\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#13445a\", \"#970747\", \"#446878\", \"#fef4e8\"], \"DESCRIPTION\": [\"Red\", \"Grey\", \"Blue\", \"Wedding\", \"Retro\"], },\n \"DarkBlue14\": {\"BACKGROUND\": \"#21273d\", \"TEXT\": \"#f1f6f8\", \"INPUT\": \"#b9d4f1\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#b9d4f1\",\n \"BUTTON\": (\"#FFFFFF\", \"#6a759b\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#21273d\", \"#6a759b\", \"#b9d4f1\", \"#f1f6f8\"], \"DESCRIPTION\": [\"Blue\", \"#000000\", \"Grey\", \"Cold\", \"Winter\"], },\n \"LightBlue6\": {\"BACKGROUND\": \"#f1f6f8\", \"TEXT\": \"#21273d\", \"INPUT\": \"#6a759b\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#21273d\",\n \"BUTTON\": (\"#f1f6f8\", \"#6a759b\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#21273d\", \"#6a759b\", \"#b9d4f1\", \"#f1f6f8\"], \"DESCRIPTION\": [\"Blue\", \"#000000\", \"Grey\", \"Cold\", \"Winter\"], },\n \"DarkGreen4\": {\"BACKGROUND\": \"#044343\", \"TEXT\": \"#e4e4e4\", \"INPUT\": \"#045757\", \"TEXT_INPUT\": \"#e4e4e4\", \"SCROLL\": \"#045757\",\n \"BUTTON\": (\"#e4e4e4\", \"#045757\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#222222\", \"#044343\", \"#045757\", \"#e4e4e4\"], \"DESCRIPTION\": [\"#000000\", \"Turquoise\", \"Grey\", \"Dark\"], },\n \"DarkGreen5\": {\"BACKGROUND\": \"#1b4b36\", \"TEXT\": \"#e0e7f1\", \"INPUT\": \"#aebd77\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#aebd77\",\n \"BUTTON\": (\"#FFFFFF\", \"#538f6a\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#1b4b36\", \"#538f6a\", \"#aebd77\", \"#e0e7f1\"], \"DESCRIPTION\": [\"Green\", \"Grey\"], },\n \"DarkTeal10\": {\"BACKGROUND\": \"#0d3446\", \"TEXT\": \"#d8dfe2\", \"INPUT\": \"#71adb5\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#71adb5\",\n \"BUTTON\": (\"#FFFFFF\", \"#176d81\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#0d3446\", \"#176d81\", \"#71adb5\", \"#d8dfe2\"], \"DESCRIPTION\": [\"Grey\", \"Turquoise\", \"Winter\", \"Cold\"], },\n \"DarkGrey6\": {\"BACKGROUND\": \"#3e3e3e\", \"TEXT\": \"#ededed\", \"INPUT\": \"#68868c\", \"TEXT_INPUT\": \"#ededed\", \"SCROLL\": \"#68868c\",\n \"BUTTON\": (\"#FFFFFF\", \"#405559\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#3e3e3e\", \"#405559\", \"#68868c\", \"#ededed\"], \"DESCRIPTION\": [\"Grey\", \"Turquoise\", \"Winter\"], },\n \"DarkTeal11\": {\"BACKGROUND\": \"#405559\", \"TEXT\": \"#ededed\", \"INPUT\": \"#68868c\", \"TEXT_INPUT\": \"#ededed\", \"SCROLL\": \"#68868c\",\n \"BUTTON\": (\"#ededed\", \"#68868c\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#3e3e3e\", \"#405559\", \"#68868c\", \"#ededed\"], \"DESCRIPTION\": [\"Grey\", \"Turquoise\", \"Winter\"], },\n \"LightBlue7\": {\"BACKGROUND\": \"#9ed0e0\", \"TEXT\": \"#19483f\", \"INPUT\": \"#5c868e\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#19483f\",\n \"BUTTON\": (\"#FFFFFF\", \"#19483f\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#19483f\", \"#5c868e\", \"#ff6a38\", \"#9ed0e0\"], \"DESCRIPTION\": [\"Orange\", \"Blue\", \"Turquoise\"], },\n \"LightGreen10\": {\"BACKGROUND\": \"#d8ebb5\", \"TEXT\": \"#205d67\", \"INPUT\": \"#639a67\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#205d67\",\n \"BUTTON\": (\"#d8ebb5\", \"#205d67\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#205d67\", \"#639a67\", \"#d9bf77\", \"#d8ebb5\"], \"DESCRIPTION\": [\"Blue\", \"Green\", \"Brown\", \"Vintage\"], },\n \"DarkBlue15\": {\"BACKGROUND\": \"#151680\", \"TEXT\": \"#f1fea4\", \"INPUT\": \"#375fc0\", \"TEXT_INPUT\": \"#f1fea4\", \"SCROLL\": \"#375fc0\",\n \"BUTTON\": (\"#f1fea4\", \"#1c44ac\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#151680\", \"#1c44ac\", \"#375fc0\", \"#f1fea4\"], \"DESCRIPTION\": [\"Blue\", \"Yellow\", \"Cold\"], },\n \"DarkBlue16\": {\"BACKGROUND\": \"#1c44ac\", \"TEXT\": \"#f1fea4\", \"INPUT\": \"#375fc0\", \"TEXT_INPUT\": \"#f1fea4\", \"SCROLL\": \"#375fc0\",\n \"BUTTON\": (\"#f1fea4\", \"#151680\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#151680\", \"#1c44ac\", \"#375fc0\", \"#f1fea4\"], \"DESCRIPTION\": [\"Blue\", \"Yellow\", \"Cold\"], },\n \"DarkTeal12\": {\"BACKGROUND\": \"#004a7c\", \"TEXT\": \"#fafafa\", \"INPUT\": \"#e8f1f5\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e8f1f5\",\n \"BUTTON\": (\"#fafafa\", \"#005691\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#004a7c\", \"#005691\", \"#e8f1f5\", \"#fafafa\"], \"DESCRIPTION\": [\"Grey\", \"Blue\", \"Cold\", \"Winter\"], },\n \"LightBrown13\": {\"BACKGROUND\": \"#ebf5ee\", \"TEXT\": \"#921224\", \"INPUT\": \"#bdc6b8\", \"TEXT_INPUT\": \"#921224\", \"SCROLL\": \"#921224\",\n \"BUTTON\": (\"#FFFFFF\", \"#921224\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#921224\", \"#bdc6b8\", \"#bce0da\", \"#ebf5ee\"], \"DESCRIPTION\": [\"Red\", \"Blue\", \"Grey\", \"Vintage\", \"Wedding\"], },\n \"DarkBlue17\": {\"BACKGROUND\": \"#21294c\", \"TEXT\": \"#f9f2d7\", \"INPUT\": \"#f2dea8\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#f2dea8\",\n \"BUTTON\": (\"#f9f2d7\", \"#141829\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#141829\", \"#21294c\", \"#f2dea8\", \"#f9f2d7\"], \"DESCRIPTION\": [\"#000000\", \"Blue\", \"Yellow\"], },\n \"DarkBrown6\": {\"BACKGROUND\": \"#785e4d\", \"TEXT\": \"#f2eee3\", \"INPUT\": \"#baaf92\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#baaf92\",\n \"BUTTON\": (\"#FFFFFF\", \"#785e4d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#785e4d\", \"#ff8426\", \"#baaf92\", \"#f2eee3\"], \"DESCRIPTION\": [\"Grey\", \"Brown\", \"Orange\", \"Autumn\"], },\n \"DarkGreen6\": {\"BACKGROUND\": \"#5c715e\", \"TEXT\": \"#f2f9f1\", \"INPUT\": \"#ddeedf\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#ddeedf\",\n \"BUTTON\": (\"#f2f9f1\", \"#5c715e\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#5c715e\", \"#b6cdbd\", \"#ddeedf\", \"#f2f9f1\"], \"DESCRIPTION\": [\"Grey\", \"Green\", \"Vintage\"], },\n \"DarkGreen7\": {\"BACKGROUND\": \"#0C231E\", \"TEXT\": \"#efbe1c\", \"INPUT\": \"#153C33\", \"TEXT_INPUT\": \"#efbe1c\", \"SCROLL\": \"#153C33\",\n \"BUTTON\": (\"#efbe1c\", \"#153C33\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey7\": {\"BACKGROUND\": \"#4b586e\", \"TEXT\": \"#dddddd\", \"INPUT\": \"#574e6d\", \"TEXT_INPUT\": \"#dddddd\", \"SCROLL\": \"#574e6d\",\n \"BUTTON\": (\"#dddddd\", \"#43405d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#43405d\", \"#4b586e\", \"#574e6d\", \"#dddddd\"], \"DESCRIPTION\": [\"Grey\", \"Winter\", \"Cold\"], },\n \"DarkRed2\": {\"BACKGROUND\": \"#ab1212\", \"TEXT\": \"#f6e4b5\", \"INPUT\": \"#cd3131\", \"TEXT_INPUT\": \"#f6e4b5\", \"SCROLL\": \"#cd3131\", \"BUTTON\": (\"#f6e4b5\", \"#ab1212\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#ab1212\", \"#1fad9f\", \"#cd3131\", \"#f6e4b5\"], \"DESCRIPTION\": [\"Turquoise\", \"Red\", \"Yellow\"], },\n \"LightGrey6\": {\"BACKGROUND\": \"#e3e3e3\", \"TEXT\": \"#233142\", \"INPUT\": \"#455d7a\", \"TEXT_INPUT\": \"#e3e3e3\", \"SCROLL\": \"#233142\",\n \"BUTTON\": (\"#e3e3e3\", \"#455d7a\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#233142\", \"#455d7a\", \"#f95959\", \"#e3e3e3\"], \"DESCRIPTION\": [\"#000000\", \"Blue\", \"Red\", \"Grey\"], },\n \"HotDogStand\": {\"BACKGROUND\": \"red\", \"TEXT\": \"yellow\", \"INPUT\": \"yellow\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"yellow\", \"BUTTON\": (\"red\", \"yellow\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey8\": {\"BACKGROUND\": \"#19232D\", \"TEXT\": \"#ffffff\", \"INPUT\": \"#32414B\", \"TEXT_INPUT\": \"#ffffff\", \"SCROLL\": \"#505F69\",\n \"BUTTON\": (\"#ffffff\", \"#32414B\"), \"PROGRESS\": (\"#505F69\", \"#32414B\"), \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey9\": {\"BACKGROUND\": \"#36393F\", \"TEXT\": \"#DCDDDE\", \"INPUT\": \"#40444B\", \"TEXT_INPUT\": \"#ffffff\", \"SCROLL\": \"#202225\",\n \"BUTTON\": (\"#202225\", \"#B9BBBE\"), \"PROGRESS\": (\"#202225\", \"#40444B\"), \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey10\": {\"BACKGROUND\": \"#1c1e23\", \"TEXT\": \"#cccdcf\", \"INPUT\": \"#272a31\", \"TEXT_INPUT\": \"#8b9fde\", \"SCROLL\": \"#313641\",\n \"BUTTON\": (\"#f5f5f6\", \"#2e3d5a\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey11\": {\"BACKGROUND\": \"#1c1e23\", \"TEXT\": \"#cccdcf\", \"INPUT\": \"#313641\", \"TEXT_INPUT\": \"#cccdcf\", \"SCROLL\": \"#313641\",\n \"BUTTON\": (\"#f5f5f6\", \"#313641\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey12\": {\"BACKGROUND\": \"#1c1e23\", \"TEXT\": \"#8b9fde\", \"INPUT\": \"#313641\", \"TEXT_INPUT\": \"#8b9fde\", \"SCROLL\": \"#313641\",\n \"BUTTON\": (\"#cccdcf\", \"#2e3d5a\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey13\": {\"BACKGROUND\": \"#1c1e23\", \"TEXT\": \"#cccdcf\", \"INPUT\": \"#272a31\", \"TEXT_INPUT\": \"#cccdcf\", \"SCROLL\": \"#313641\",\n \"BUTTON\": (\"#8b9fde\", \"#313641\"), \"PROGRESS\": (\"#cccdcf\", \"#272a31\"), \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey14\": {\"BACKGROUND\": \"#24292e\", \"TEXT\": \"#fafbfc\", \"INPUT\": \"#1d2125\", \"TEXT_INPUT\": \"#fafbfc\", \"SCROLL\": \"#1d2125\",\n \"BUTTON\": (\"#fafbfc\", \"#155398\"), \"PROGRESS\": (\"#155398\", \"#1d2125\"), \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBrown7\": {\"BACKGROUND\": \"#2c2417\", \"TEXT\": \"#baa379\", \"INPUT\": \"#baa379\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#392e1c\",\n \"BUTTON\": (\"#000000\", \"#baa379\"), \"PROGRESS\": (\"#baa379\", \"#453923\"), \"BORDER\": 1, \"SLIDER_DEPTH\": 1, \"PROGRESS_DEPTH\": 0, },\n \"Python\": {\"BACKGROUND\": \"#3d7aab\", \"TEXT\": \"#ffde56\", \"INPUT\": \"#295273\", \"TEXT_INPUT\": \"#ffde56\", \"SCROLL\": \"#295273\", \"BUTTON\": (\"#ffde56\", \"#295273\"),\n \"PROGRESS\": (\"#ffde56\", \"#295273\"), \"BORDER\": 1, \"SLIDER_DEPTH\": 1, \"PROGRESS_DEPTH\": 0, },\n}\n\n", + "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 10839, + "n_words": 4824, + "vocab_size": 1112, + "complexity": 1, + "nloc": 14, + "token_counts": 255, + "n_ast_nodes": 19192, + "n_identifiers": 131, + "d_id": 53473, + "documentation": { + "docstring": "\n :param icon: Can be either a filename or Base64 value. For Windows if filename, it MUST be ICO format. For Linux, must NOT be ICO. Most portable is to use a Base64 of a PNG file. This works universally across all OS's\n :type icon: bytes | str\n :param button_color: Color of the button (text, background)\n :type button_color: (str, str) or str\n :param element_size: element size (width, height) in characters\n :type element_size: (int, int)\n :param button_element_size: Size of button\n :type button_element_size: (int, int)\n :param margins: (left/right, top/bottom) tkinter margins around outsize. Amount of pixels to leave inside the window's frame around the edges before your elements are shown.\n :type margins: (int, int)\n :param element_padding: Default amount of padding to put around elements in window (left/right, top/bottom) or ((left, right), (top, bottom))\n :type element_padding: (int, int) or ((int, int),(int,int))\n :param auto_size_text: True if the Widget should be shrunk to exactly fit the number of chars to show\n :type auto_size_text: bool\n :param auto_size_buttons: True if Buttons in this Window should be sized to exactly fit the text on this.\n :type auto_size_buttons: (bool)\n :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike\n :type font: (str or (str, int[, str]) or None)\n :param border_width: width of border around element\n :type border_width: (int)\n :param slider_border_width: Width of the border around sliders\n :type slider_border_width: (int)\n :param slider_relief: Type of relief to use for sliders\n :type slider_relief: (str)\n :param slider_orientation: ???\n :type slider_orientation: ???\n :param autoclose_time: ???\n :type autoclose_time: ???\n :param message_box_line_width: ???\n :type message_box_line_width: ???\n :param progress_meter_border_depth: ???\n :type progress_meter_border_depth: ???\n :param progress_meter_style: You can no longer set a progress bar style. All ttk styles must be the same for the window\n :type progress_meter_style: ???\n :param progress_meter_relief:\n :type progress_meter_relief: ???\n :param progress_meter_color: ???\n :type progress_meter_color: ???\n :param progress_meter_size: ???\n :type progress_meter_size: ???\n :param text_justification: Default text justification for all Text Elements in window\n :type text_justification: 'left' | 'right' | 'center'\n :param background_color: color of background\n :type background_color: (str)\n :param element_background_color: element background color\n :type element_background_color: (str)\n :param text_element_background_color: text element background color\n :type text_element_background_color: (str)\n :param input_elements_background_color: Default color to use for the background of input elements\n :type input_elements_background_color: (str)\n :param input_text_color: Default color to use for the text for Input elements\n :type input_text_color: (str)\n :param scrollbar_color: Default color to use for the slider trough\n :type scrollbar_color: (str)\n :param text_color: color of the text\n :type text_color: (str)\n :param element_text_color: Default color to use for Text elements\n :type element_text_color: (str)\n :param debug_win_size: window size\n :type debug_win_size: (int, int)\n :param window_location: Default location to place windows. Not setting will center windows on the display\n :type window_location: (int, int) | None\n :param error_button_color: (Default = (None))\n :type error_button_color: ???\n :param tooltip_time: time in milliseconds to wait before showing a tooltip. Default is 400ms\n :type tooltip_time: (int)\n :param tooltip_font: font to use for all tooltips\n :type tooltip_font: str or Tuple[str, int] or Tuple[str, int, str]\n :param use_ttk_buttons: if True will cause all buttons to be ttk buttons\n :type use_ttk_buttons: (bool)\n :param ttk_theme: Theme to use with ttk widgets. Choices (on Windows) include - 'default', 'winnative', 'clam', 'alt', 'classic', 'vista', 'xpnative'\n :type ttk_theme: (str)\n :param suppress_error_popups: If True then error popups will not be shown if generated internally to PySimpleGUI\n :type suppress_error_popups: (bool)\n :param suppress_raise_key_errors: If True then key errors won't be raised (you'll still get popup error)\n :type suppress_raise_key_errors: (bool)\n :param suppress_key_guessing: If True then key errors won't try and find closest matches for you\n :type suppress_key_guessing: (bool)\n :param warn_button_key_duplicates: If True then duplicate Button Keys generate warnings (not recommended as they're expected)\n :type warn_button_key_duplicates: (bool) \n :param enable_treeview_869_patch: If True, then will use the treeview color patch for tk 8.6.9\n :type enable_treeview_869_patch: (bool)\n :param enable_mac_notitlebar_patch: If True then Windows with no titlebar use an alternative technique when tkinter version < 8.6.10\n :type enable_mac_notitlebar_patch: (bool)\n :param use_custom_titlebar: If True then a custom titlebar is used instead of the normal system titlebar\n :type use_custom_titlebar: (bool)\n :param titlebar_background_color: If custom titlebar indicated by use_custom_titlebar, then use this as background color\n :type titlebar_background_color: str | None\n :param titlebar_text_color: If custom titlebar indicated by use_custom_titlebar, then use this as text color\n :type titlebar_text_color: str | None\n :param titlebar_font: If custom titlebar indicated by use_custom_titlebar, then use this as title font\n :type titlebar_font: (str or (str, int[, str]) or None) | None\n :param titlebar_icon: If custom titlebar indicated by use_custom_titlebar, then use this as the icon (file or base64 bytes)\n :type titlebar_icon: bytes | str\n :param user_settings_path: default path for user_settings API calls. Expanded with os.path.expanduser so can contain ~ to represent user\n :type user_settings_path: (str)\n :param pysimplegui_settings_path: default path for the global PySimpleGUI user_settings\n :type pysimplegui_settings_path: (str)\n :param pysimplegui_settings_filename: default filename for the global PySimpleGUI user_settings\n :type pysimplegui_settings_filename: (str)\n :param keep_on_top: If True then all windows will automatically be set to keep_on_top=True\n :type keep_on_top: (bool)\n :param dpi_awareness: If True then will turn on DPI awareness (Windows only at the moment)\n :type dpi_awareness: (bool)\n :param scaling: Sets the default scaling for all windows including popups, etc.\n :type scaling: (float)\n :param disable_modal_windows: If True then all windows, including popups, will not be modal windows\n :type disable_modal_windows: (bool)\n :param tooltip_offset: Offset to use for tooltips as a tuple. These values will be added to the mouse location when the widget was entered.\n :type tooltip_offset: ((None, None) | (int, int))\n :return: None\n :rtype: None\n ", + "n_words": 889, + "vocab_size": 356, + "n_whitespaces": 2847, + "language": "en" + } + }, + { + "id": 296961, + "commit_id": "46500beefcccd8106718a8172a5078bbe5579765", + "repo": "core", + "path": "tests/components/analytics/test_analytics.py", + "file_name": "test_analytics.py", + "fun_name": "test_load_with_supervisor_without_diagnostics", + "commit_message": "Enable strict typing of analytics (#83119)", + "code": "async def test_load_with_supervisor_without_diagnostics(hass):\n \n analytics = Analytics(hass)\n analytics._data.preferences[ATTR_DIAGNOSTICS] = True\n\n assert analytics.preferences[ATTR_DIAGNOSTICS]\n\n with patch(\n \"homeassistant.components.hassio.get_supervisor_info\",\n side_effect=Mock(return_value={\"diagnostics\": False}),\n ), patch(\n \"homeassistant.components.hassio.is_hassio\",\n side_effect=Mock(return_value=True),\n ):\n await analytics.load()\n\n assert not analytics.preferences[ATTR_DIAGNOSTICS]\n\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 85, + "n_words": 26, + "vocab_size": 22, + "complexity": 1, + "nloc": 13, + "token_counts": 78, + "n_ast_nodes": 132, + "n_identifiers": 12, + "d_id": 95933, + "documentation": { + "docstring": "Test loading with a supervisor that has not diagnostics enabled.", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 93204, + "commit_id": "db35e231ceababe8c9f5ca7b5d2ca685f07c7d5b", + "repo": "sentry", + "path": "tests/sentry/integrations/msteams/test_message_builder.py", + "file_name": "test_message_builder.py", + "fun_name": "test_issue_message_builder", + "commit_message": "test(msteams): Add tests for building group card (#36834)\n\nAdd tests for build_group_card which builds issues cards. Does NOT test all visual aspects of the card. Only ensures that certain important elements are present and the basic structure of the card is correct.", + "code": "def test_issue_message_builder(self):\n self.event1.data[\"metadata\"].update({\"value\": \"some error\"})\n self.group1.data[\"metadata\"].update({\"value\": \"some error\"})\n self.event1.data[\"type\"] = self.group1.data[\"type\"] = \"error\"\n\n issue_card = build_group_card(\n group=self.group1, event=self.event1, rules=self.rules, integration=self.integration\n )\n\n body = issue_card[\"body\"]\n assert 4 == len(body)\n\n title = body[0]\n assert \"oh no\" in title[\"text\"]\n assert TextSize.LARGE == title[\"size\"]\n assert TextWeight.BOLDER == title[\"weight\"]\n\n description = body[1]\n assert \"some error\" == description[\"text\"]\n assert TextWeight.BOLDER == description[\"weight\"]\n\n footer = body[2]\n assert \"ColumnSet\" == footer[\"type\"]\n assert 3 == len(footer[\"columns\"])\n\n logo = footer[\"columns\"][0][\"items\"][0]\n assert \"20px\" == logo[\"height\"]\n\n issue_id_and_rule = footer[\"columns\"][1][\"items\"][0]\n assert self.group1.qualified_short_id in issue_id_and_rule[\"text\"]\n assert \"rule1\" in issue_id_and_rule[\"text\"]\n assert \"+1 other\" in issue_id_and_rule[\"text\"]\n\n date = footer[\"columns\"][2][\"items\"][0]\n assert (\n re.match(\n r,\n date[\"text\"],\n re.VERBOSE,\n )\n is not None\n )\n\n actions_container = body[3]\n assert \"Container\" == actions_container[\"type\"]\n\n action_set = actions_container[\"items\"][0]\n assert \"ActionSet\" == action_set[\"type\"]\n\n actions = action_set[\"actions\"]\n for action in actions:\n assert ActionType.SHOW_CARD == action[\"type\"]\n card_body = action[\"card\"][\"body\"]\n assert 1 <= len(card_body)\n assert \"Input.ChoiceSet\" == card_body[-1][\"type\"]\n\n resolve_action, ignore_action, assign_action = actions\n assert \"Resolve\" == resolve_action[\"title\"]\n assert \"Ignore\" == ignore_action[\"title\"]\n assert \"Assign\" == assign_action[\"title\"]\n\n # Check if card is serializable to json\n card_json = json.dumps(issue_card)\n assert card_json[0] == \"{\" and card_json[-1] == \"}\"\n", + "url": "https://github.com/getsentry/sentry.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 581, + "n_words": 176, + "vocab_size": 110, + "complexity": 3, + "nloc": 60, + "token_counts": 402, + "n_ast_nodes": 694, + "n_identifiers": 41, + "d_id": 18974, + "documentation": { + "docstring": "\\{\\{ # {{\n DATE\\( # DATE(\n [0-9T+:\\-]+,\\ SHORT # 2022-07-14T19:30:34, SHORT\n \\) # )\n \\}\\} # }}\n \\ # whitespace\n at # at\n \\ # whitespace\n \\{\\{ # {{\n TIME\\([0-9T+:\\-]+\\) # TIME(2022-07-14T19:30:34)\n \\}\\} # }}", + "n_words": 35, + "vocab_size": 17, + "n_whitespaces": 369, + "language": "en" + } + }, + { + "id": 206296, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/template/loaders/cached.py", + "file_name": "cached.py", + "fun_name": "cache_key", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def cache_key(self, template_name, skip=None):\n \n skip_prefix = \"\"\n\n if skip:\n matching = [\n origin.name for origin in skip if origin.template_name == template_name\n ]\n if matching:\n skip_prefix = self.generate_hash(matching)\n\n return \"-\".join(s for s in (str(template_name), skip_prefix) if s)\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 127, + "n_words": 36, + "vocab_size": 28, + "complexity": 7, + "nloc": 9, + "token_counts": 66, + "n_ast_nodes": 106, + "n_identifiers": 12, + "d_id": 51474, + "documentation": { + "docstring": "\n Generate a cache key for the template name and skip.\n\n If skip is provided, only origins that match template_name are included\n in the cache key. This ensures each template is only parsed and cached\n once if contained in different extend chains like:\n\n x -> a -> a\n y -> a -> a\n z -> a -> a\n ", + "n_words": 57, + "vocab_size": 39, + "n_whitespaces": 126, + "language": "en" + } + }, + { + "id": 178712, + "commit_id": "77e7c06c0f9c5c0735b5a65c72abcd243d8e3640", + "repo": "Nuitka", + "path": "nuitka/PythonFlavors.py", + "file_name": "PythonFlavors.py", + "fun_name": "isNuitkaPython", + "commit_message": "Minor cleanups", + "code": "def isNuitkaPython():\n \n\n # spell-checker: ignore nuitkapython\n\n if python_version >= 0x300:\n return sys.implementation.name == \"nuitkapython\"\n else:\n return sys.subversion[0] == \"nuitkapython\"\n\n\n_is_anaconda = None\n\n", + "url": "https://github.com/Nuitka/Nuitka.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 47, + "n_words": 22, + "vocab_size": 19, + "complexity": 2, + "nloc": 5, + "token_counts": 29, + "n_ast_nodes": 59, + "n_identifiers": 7, + "d_id": 42801, + "documentation": { + "docstring": "Is this our own fork of CPython named Nuitka-Python.", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 60555, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_internal/cli/req_command.py", + "file_name": "req_command.py", + "fun_name": "handle_pip_version_check", + "commit_message": "upd; format", + "code": "def handle_pip_version_check(self, options):\n # type: (Values) -> None\n \n # Make sure the index_group options are present.\n assert hasattr(options, \"no_index\")\n\n if options.disable_pip_version_check or options.no_index:\n return\n\n # Otherwise, check if we're using the latest version of pip available.\n session = self._build_session(\n options, retries=0, timeout=min(5, options.timeout)\n )\n with session:\n pip_self_version_check(session, options)\n\n\nKEEPABLE_TEMPDIR_TYPES = [\n tempdir_kinds.BUILD_ENV,\n tempdir_kinds.EPHEM_WHEEL_CACHE,\n tempdir_kinds.REQ_BUILD,\n]\n\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 158, + "n_words": 55, + "vocab_size": 50, + "complexity": 3, + "nloc": 9, + "token_counts": 57, + "n_ast_nodes": 117, + "n_identifiers": 17, + "d_id": 12206, + "documentation": { + "docstring": "\n Do the pip version check if not disabled.\n\n This overrides the default behavior of not doing the check.\n ", + "n_words": 18, + "vocab_size": 15, + "n_whitespaces": 40, + "language": "en" + } + }, + { + "id": 203500, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/contrib/admin/templatetags/admin_modify.py", + "file_name": "admin_modify.py", + "fun_name": "submit_row", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def submit_row(context):\n \n add = context[\"add\"]\n change = context[\"change\"]\n is_popup = context[\"is_popup\"]\n save_as = context[\"save_as\"]\n show_save = context.get(\"show_save\", True)\n show_save_and_add_another = context.get(\"show_save_and_add_another\", True)\n show_save_and_continue = context.get(\"show_save_and_continue\", True)\n has_add_permission = context[\"has_add_permission\"]\n has_change_permission = context[\"has_change_permission\"]\n has_view_permission = context[\"has_view_permission\"]\n has_editable_inline_admin_formsets = context[\"has_editable_inline_admin_formsets\"]\n can_save = (\n (has_change_permission and change)\n or (has_add_permission and add)\n or has_editable_inline_admin_formsets\n )\n can_save_and_add_another = (\n has_add_permission\n and not is_popup\n and (not save_as or add)\n and can_save\n and show_save_and_add_another\n )\n can_save_and_continue = (\n not is_popup and can_save and has_view_permission and show_save_and_continue\n )\n can_change = has_change_permission or has_editable_inline_admin_formsets\n ctx = Context(context)\n ctx.update(\n {\n \"can_change\": can_change,\n \"show_delete_link\": (\n not is_popup\n and context[\"has_delete_permission\"]\n and change\n and context.get(\"show_delete\", True)\n ),\n \"show_save_as_new\": not is_popup\n and has_change_permission\n and change\n and save_as,\n \"show_save_and_add_another\": can_save_and_add_another,\n \"show_save_and_continue\": can_save_and_continue,\n \"show_save\": show_save and can_save,\n \"show_close\": not (show_save and can_save),\n }\n )\n return ctx\n\n\n@register.tag(name=\"submit_row\")", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "@register.tag(name=\"submit_row\")", + "n_ast_errors": 1, + "ast_levels": 15, + "n_whitespaces": 457, + "n_words": 131, + "vocab_size": 64, + "complexity": 22, + "nloc": 49, + "token_counts": 213, + "n_ast_nodes": 380, + "n_identifiers": 24, + "d_id": 50412, + "documentation": { + "docstring": "\n Display the row of buttons for delete and save.\n ", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 16, + "language": "en" + } + }, + { + "id": 259066, + "commit_id": "f14af688b7e77ecb6df9dfee93ec39b6c0334b86", + "repo": "scikit-learn", + "path": "sklearn/linear_model/tests/test_ridge.py", + "file_name": "test_ridge.py", + "fun_name": "test_ridgecv_normalize_deprecated", + "commit_message": "FIX Make Ridge*CV warn about rescaling alphas with scaling (#22585)", + "code": "def test_ridgecv_normalize_deprecated(Estimator):\n \n X = np.array([[1, -1], [1, 1]])\n y = np.array([0, 1])\n\n estimator = Estimator(normalize=True)\n\n with pytest.warns(\n FutureWarning, match=r\"Set parameter alphas to: original_alphas \\* n_samples\"\n ):\n estimator.fit(X, y)\n", + "url": "https://github.com/scikit-learn/scikit-learn.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 60, + "n_words": 28, + "vocab_size": 26, + "complexity": 1, + "nloc": 8, + "token_counts": 68, + "n_ast_nodes": 108, + "n_identifiers": 13, + "d_id": 75551, + "documentation": { + "docstring": "Check that the normalize deprecation warning mentions the rescaling of alphas\n\n Non-regression test for issue #22540\n ", + "n_words": 16, + "vocab_size": 15, + "n_whitespaces": 22, + "language": "en" + } + }, + { + "id": 308815, + "commit_id": "10027b20904b678d8baecbc6e72c5bcc3f4f24b2", + "repo": "core", + "path": "homeassistant/components/nissan_leaf/__init__.py", + "file_name": "__init__.py", + "fun_name": "async_start_charging", + "commit_message": "Add button to start leaf charge (#62948)\n\nCo-authored-by: Bruce Duncan ", + "code": "async def async_start_charging(self) -> None:\n \n await self.hass.async_add_executor_job(self.leaf.start_charging)\n self.schedule_update()\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 29, + "n_words": 8, + "vocab_size": 8, + "complexity": 1, + "nloc": 4, + "token_counts": 26, + "n_ast_nodes": 47, + "n_identifiers": 7, + "d_id": 107548, + "documentation": { + "docstring": "Request to start charging the car. Used by the button platform.", + "n_words": 11, + "vocab_size": 10, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 306486, + "commit_id": "0c767bd0d37a41af37728b1d8b4eae8dceb7e188", + "repo": "core", + "path": "homeassistant/components/smappee/binary_sensor.py", + "file_name": "binary_sensor.py", + "fun_name": "async_update", + "commit_message": "Improve entity type hints [s] (part 1/2) (#77881)", + "code": "async def async_update(self) -> None:\n \n await self._smappee_base.async_update()\n\n self._state = self._service_location.is_present\n\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 31, + "n_words": 10, + "vocab_size": 10, + "complexity": 1, + "nloc": 4, + "token_counts": 25, + "n_ast_nodes": 45, + "n_identifiers": 6, + "d_id": 105270, + "documentation": { + "docstring": "Get the latest data from Smappee and update the state.", + "n_words": 10, + "vocab_size": 9, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 265126, + "commit_id": "103729c0855aad2f45fcaa2cf680799236f3e201", + "repo": "netbox", + "path": "netbox/dcim/tests/test_models.py", + "file_name": "test_models.py", + "fun_name": "test_mount_half_u_devices", + "commit_message": "Add test for 0.5U devices", + "code": "def test_mount_half_u_devices(self):\n \n rack = Rack.objects.first()\n attrs = {\n 'device_type': DeviceType.objects.get(u_height=0.5),\n 'device_role': DeviceRole.objects.first(),\n 'site': Site.objects.first(),\n 'rack': rack,\n 'face': DeviceFaceChoices.FACE_FRONT,\n }\n\n Device(name='Device 1', position=1, **attrs).save()\n Device(name='Device 2', position=1.5, **attrs).save()\n\n self.assertEqual(len(rack.get_available_units()), rack.u_height * 2 - 3)\n", + "url": "https://github.com/netbox-community/netbox.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 137, + "n_words": 33, + "vocab_size": 30, + "complexity": 1, + "nloc": 12, + "token_counts": 121, + "n_ast_nodes": 196, + "n_identifiers": 21, + "d_id": 77999, + "documentation": { + "docstring": "\n Check that two 0.5U devices can be mounted in the same rack unit.\n ", + "n_words": 13, + "vocab_size": 13, + "n_whitespaces": 28, + "language": "en" + } + }, + { + "id": 248955, + "commit_id": "922b771337f6d14a556fa761c783748f698e924b", + "repo": "synapse", + "path": "tests/unittest.py", + "file_name": "unittest.py", + "fun_name": "make_homeserver", + "commit_message": "Add missing type hints for tests.unittest. (#13397)", + "code": "def make_homeserver(self, reactor, clock):\n \n hs = self.setup_test_homeserver()\n return hs\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 30, + "n_words": 9, + "vocab_size": 8, + "complexity": 1, + "nloc": 3, + "token_counts": 19, + "n_ast_nodes": 32, + "n_identifiers": 6, + "d_id": 72530, + "documentation": { + "docstring": "\n Make and return a homeserver.\n\n Args:\n reactor: A Twisted Reactor, or something that pretends to be one.\n clock (synapse.util.Clock): The Clock, associated with the reactor.\n\n Returns:\n A homeserver suitable for testing.\n\n Function to be overridden in subclasses.\n ", + "n_words": 37, + "vocab_size": 34, + "n_whitespaces": 106, + "language": "en" + } + }, + { + "id": 205746, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/db/models/query.py", + "file_name": "query.py", + "fun_name": "ordered", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def ordered(self):\n \n if isinstance(self, EmptyQuerySet):\n return True\n if self.query.extra_order_by or self.query.order_by:\n return True\n elif (\n self.query.default_ordering\n and self.query.get_meta().ordering\n and\n # A default ordering doesn't affect GROUP BY queries.\n not self.query.group_by\n ):\n return True\n else:\n return False\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 177, + "n_words": 36, + "vocab_size": 29, + "complexity": 7, + "nloc": 14, + "token_counts": 63, + "n_ast_nodes": 103, + "n_identifiers": 11, + "d_id": 51188, + "documentation": { + "docstring": "\n Return True if the QuerySet is ordered -- i.e. has an order_by()\n clause or a default ordering on the model (or is empty).\n ", + "n_words": 23, + "vocab_size": 21, + "n_whitespaces": 45, + "language": "en" + } + }, + { + "id": 72414, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/admin/views/generic/multiple_upload.py", + "file_name": "multiple_upload.py", + "fun_name": "get_invalid_response_data", + "commit_message": "Reformat with black", + "code": "def get_invalid_response_data(self, form):\n \n return {\n \"success\": False,\n \"error_message\": \"\\n\".join(form.errors[\"file\"]),\n }\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 53, + "n_words": 10, + "vocab_size": 10, + "complexity": 1, + "nloc": 5, + "token_counts": 29, + "n_ast_nodes": 54, + "n_identifiers": 5, + "d_id": 15887, + "documentation": { + "docstring": "\n Return the JSON response data for an invalid form submission\n ", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 25, + "language": "en" + } + }, + { + "id": 264915, + "commit_id": "6ff2e55ce408f0f7f2fe99129048421c25ecafe6", + "repo": "netbox", + "path": "netbox/dcim/models/cables.py", + "file_name": "cables.py", + "fun_name": "path_objects", + "commit_message": "Add origins, destinations properties on CablePath", + "code": "def path_objects(self):\n \n if not hasattr(self, '_path_objects'):\n self._path_objects = self._get_path()\n return self._path_objects\n", + "url": "https://github.com/netbox-community/netbox.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 43, + "n_words": 11, + "vocab_size": 10, + "complexity": 2, + "nloc": 4, + "token_counts": 28, + "n_ast_nodes": 50, + "n_identifiers": 5, + "d_id": 77911, + "documentation": { + "docstring": "\n Cache and return the complete path as lists of objects, derived from their annotation within the path.\n ", + "n_words": 17, + "vocab_size": 16, + "n_whitespaces": 32, + "language": "en" + } + }, + { + "id": 107757, + "commit_id": "e1eca0aa8bf0b51009e012cd37d3e95f364d0ee9", + "repo": "matplotlib", + "path": "lib/matplotlib/backends/backend_wx.py", + "file_name": "backend_wx.py", + "fun_name": "gui_repaint", + "commit_message": "Expire deprecations in backends", + "code": "def gui_repaint(self, drawDC=None):\n \n _log.debug(\"%s - gui_repaint()\", type(self))\n # The \"if self\" check avoids a \"wrapped C/C++ object has been deleted\"\n # RuntimeError if doing things after window is closed.\n if not (self and self.IsShownOnScreen()):\n return\n if not drawDC: # not called from OnPaint use a ClientDC\n drawDC = wx.ClientDC(self)\n # For 'WX' backend on Windows, the bitmap can not be in use by another\n # DC (see GraphicsContextWx._cache).\n bmp = (self.bitmap.ConvertToImage().ConvertToBitmap()\n if wx.Platform == '__WXMSW__'\n and isinstance(self.figure._cachedRenderer, RendererWx)\n else self.bitmap)\n drawDC.DrawBitmap(bmp, 0, 0)\n if self._rubberband_rect is not None:\n # Some versions of wx+python don't support numpy.float64 here.\n x0, y0, x1, y1 = map(int, self._rubberband_rect)\n drawDC.DrawLineList(\n [(x0, y0, x1, y0), (x1, y0, x1, y1),\n (x0, y0, x0, y1), (x0, y1, x1, y1)],\n wx.Pen('BLACK', 1, wx.PENSTYLE_SHORT_DASH))\n\n filetypes = {\n **FigureCanvasBase.filetypes,\n 'bmp': 'Windows bitmap',\n 'jpeg': 'JPEG',\n 'jpg': 'JPEG',\n 'pcx': 'PCX',\n 'png': 'Portable Network Graphics',\n 'tif': 'Tagged Image Format File',\n 'tiff': 'Tagged Image Format File',\n 'xpm': 'X pixmap',\n }\n", + "url": "https://github.com/matplotlib/matplotlib.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 448, + "n_words": 155, + "vocab_size": 121, + "complexity": 7, + "nloc": 17, + "token_counts": 175, + "n_ast_nodes": 350, + "n_identifiers": 31, + "d_id": 22898, + "documentation": { + "docstring": "\n Update the displayed image on the GUI canvas, using the supplied\n wx.PaintDC device context.\n\n The 'WXAgg' backend sets origin accordingly.\n ", + "n_words": 20, + "vocab_size": 18, + "n_whitespaces": 49, + "language": "en" + } + }, + { + "id": 266161, + "commit_id": "540bba4544d9f31c126571cc1a45a6783b3b6a89", + "repo": "netbox", + "path": "netbox/utilities/utils.py", + "file_name": "utils.py", + "fun_name": "copy_safe_request", + "commit_message": "Closes #10920: Include request cookies when queuing a custom script", + "code": "def copy_safe_request(request):\n \n meta = {\n k: request.META[k]\n for k in HTTP_REQUEST_META_SAFE_COPY\n if k in request.META and isinstance(request.META[k], str)\n }\n return NetBoxFakeRequest({\n 'META': meta,\n 'COOKIES': request.COOKIES,\n 'POST': request.POST,\n 'GET': request.GET,\n 'FILES': request.FILES,\n 'user': request.user,\n 'path': request.path,\n 'id': getattr(request, 'id', None), # UUID assigned by middleware\n })\n\n", + "url": "https://github.com/netbox-community/netbox.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 138, + "n_words": 45, + "vocab_size": 43, + "complexity": 4, + "nloc": 16, + "token_counts": 97, + "n_ast_nodes": 158, + "n_identifiers": 16, + "d_id": 78322, + "documentation": { + "docstring": "\n Copy selected attributes from a request object into a new fake request object. This is needed in places where\n thread safe pickling of the useful request data is needed.\n ", + "n_words": 29, + "vocab_size": 25, + "n_whitespaces": 39, + "language": "en" + } + }, + { + "id": 26250, + "commit_id": "3f773c3890aead936949bd6923d2d7f669e1c68f", + "repo": "saleor", + "path": "saleor/graphql/product/tests/test_product_filtering_and_sorting_with_channels.py", + "file_name": "test_product_filtering_and_sorting_with_channels.py", + "fun_name": "products_for_sorting_with_channels", + "commit_message": "Add sorting by LAST_MODIFIED_AT field to GraphQL schema (#9245)\n\n* Add sorting by LAST_MODIFIED_AT to new types\r\n\r\n* Add LAST_MODIFIED_AT to sorting exported files\r\n\r\n* Update schema, fix variant sorter\r\n\r\n* Update changelog\r\n\r\n* Rebase and update changelog\r\n\r\nCo-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>", + "code": "def products_for_sorting_with_channels(category, channel_USD, channel_PLN):\n product_type = ProductType.objects.create(name=\"Apple\", kind=ProductTypeKind.NORMAL)\n products = Product.objects.bulk_create(\n [\n Product(\n name=\"Product1\",\n slug=\"prod1\",\n category=category,\n product_type=product_type,\n description=dummy_editorjs(\"Test description 1.\"),\n ),\n Product(\n name=\"ProductProduct1\",\n slug=\"prod_prod1\",\n category=category,\n product_type=product_type,\n ),\n Product(\n name=\"ProductProduct2\",\n slug=\"prod_prod2\",\n category=category,\n product_type=product_type,\n ),\n Product(\n name=\"Product2\",\n slug=\"prod2\",\n category=category,\n product_type=product_type,\n description=dummy_editorjs(\"Test description 2.\"),\n ),\n Product(\n name=\"Product3\",\n slug=\"prod3\",\n category=category,\n product_type=product_type,\n description=dummy_editorjs(\"Test description 3.\"),\n ),\n ]\n )\n ProductChannelListing.objects.bulk_create(\n [\n ProductChannelListing(\n product=products[0],\n channel=channel_USD,\n is_published=True,\n discounted_price_amount=Decimal(5),\n publication_date=datetime.date(2002, 1, 1),\n ),\n ProductChannelListing(\n product=products[1],\n channel=channel_USD,\n is_published=True,\n discounted_price_amount=Decimal(15),\n publication_date=datetime.date(2000, 1, 1),\n ),\n ProductChannelListing(\n product=products[2],\n channel=channel_USD,\n is_published=False,\n discounted_price_amount=Decimal(4),\n publication_date=datetime.date(1999, 1, 1),\n ),\n ProductChannelListing(\n product=products[3],\n channel=channel_USD,\n is_published=True,\n discounted_price_amount=Decimal(7),\n publication_date=datetime.date(2001, 1, 1),\n ),\n # Second channel\n ProductChannelListing(\n product=products[0],\n channel=channel_PLN,\n is_published=False,\n discounted_price_amount=Decimal(15),\n publication_date=datetime.date(2003, 1, 1),\n ),\n ProductChannelListing(\n product=products[1],\n channel=channel_PLN,\n is_published=True,\n discounted_price_amount=Decimal(4),\n publication_date=datetime.date(1999, 1, 1),\n ),\n ProductChannelListing(\n product=products[2],\n channel=channel_PLN,\n is_published=True,\n discounted_price_amount=Decimal(5),\n publication_date=datetime.date(2000, 1, 1),\n ),\n ProductChannelListing(\n product=products[4],\n channel=channel_PLN,\n is_published=True,\n discounted_price_amount=Decimal(7),\n publication_date=datetime.date(1998, 1, 1),\n ),\n ]\n )\n variants = ProductVariant.objects.bulk_create(\n [\n ProductVariant(\n product=products[0],\n sku=str(uuid.uuid4()).replace(\"-\", \"\"),\n track_inventory=True,\n name=\"XS\",\n ),\n ProductVariant(\n product=products[1],\n sku=str(uuid.uuid4()).replace(\"-\", \"\"),\n track_inventory=True,\n name=\"S\",\n ),\n ProductVariant(\n product=products[2],\n sku=str(uuid.uuid4()).replace(\"-\", \"\"),\n track_inventory=True,\n name=\"M\",\n ),\n ProductVariant(\n product=products[3],\n sku=str(uuid.uuid4()).replace(\"-\", \"\"),\n track_inventory=True,\n name=\"L\",\n ),\n ProductVariant(\n product=products[4],\n sku=str(uuid.uuid4()).replace(\"-\", \"\"),\n track_inventory=True,\n name=\"XL\",\n ),\n ]\n )\n ProductVariantChannelListing.objects.bulk_create(\n [\n ProductVariantChannelListing(\n variant=variants[0],\n channel=channel_USD,\n price_amount=Decimal(10),\n currency=channel_USD.currency_code,\n ),\n ProductVariantChannelListing(\n variant=variants[1],\n channel=channel_USD,\n price_amount=Decimal(15),\n currency=channel_USD.currency_code,\n ),\n ProductVariantChannelListing(\n variant=variants[2],\n channel=channel_USD,\n price_amount=Decimal(8),\n currency=channel_USD.currency_code,\n ),\n ProductVariantChannelListing(\n variant=variants[3],\n channel=channel_USD,\n price_amount=Decimal(7),\n currency=channel_USD.currency_code,\n ),\n # Second channel\n ProductVariantChannelListing(\n variant=variants[0],\n channel=channel_PLN,\n price_amount=Decimal(15),\n currency=channel_PLN.currency_code,\n ),\n ProductVariantChannelListing(\n variant=variants[1],\n channel=channel_PLN,\n price_amount=Decimal(8),\n currency=channel_PLN.currency_code,\n ),\n ProductVariantChannelListing(\n variant=variants[2],\n channel=channel_PLN,\n price_amount=Decimal(10),\n currency=channel_PLN.currency_code,\n ),\n ProductVariantChannelListing(\n variant=variants[4],\n channel=channel_PLN,\n price_amount=Decimal(7),\n currency=channel_PLN.currency_code,\n ),\n ]\n )\n\n products[3].save()\n products[4].save()\n products[0].save()\n products[2].save()\n products[1].save()\n\n variants[2].save()\n variants[0].save()\n variants[4].save()\n variants[1].save()\n variants[3].save()\n\n return products\n\n\nQUERY_PRODUCTS_WITH_SORTING_AND_FILTERING = \n\n\n@pytest.mark.parametrize(\n \"sort_by\",\n [\n {\"field\": \"PUBLISHED\", \"direction\": \"ASC\"},\n {\"field\": \"PRICE\", \"direction\": \"DESC\"},\n {\"field\": \"MINIMAL_PRICE\", \"direction\": \"DESC\"},\n {\"field\": \"PUBLICATION_DATE\", \"direction\": \"DESC\"},\n ],\n)", + "url": "https://github.com/saleor/saleor.git", + "language": "Python", + "ast_errors": "@pytest.mark.parametrize(\n \"sort_by\",\n [\n {\"field\": \"PUBLISHED\", \"direction\": \"ASC\"},\n {\"field\": \"PRICE\", \"direction\": \"DESC\"},\n {\"field\": \"MINIMAL_PRICE\", \"direction\": \"DESC\"},\n {\"field\": \"PUBLICATION_DATE\", \"direction\": \"DESC\"},\n ],\n)", + "n_ast_errors": 1, + "ast_levels": 18, + "n_whitespaces": 2732, + "n_words": 263, + "vocab_size": 105, + "complexity": 1, + "nloc": 196, + "token_counts": 991, + "n_ast_nodes": 1552, + "n_identifiers": 45, + "d_id": 4954, + "documentation": { + "docstring": "\n query ($sortBy: ProductOrder, $filter: ProductFilterInput, $channel: String){\n products (\n first: 10, sortBy: $sortBy, filter: $filter, channel: $channel\n ) {\n edges {\n node {\n name\n slug\n }\n }\n }\n }\n", + "n_words": 29, + "vocab_size": 24, + "n_whitespaces": 157, + "language": "en" + } + }, + { + "id": 223085, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/distutils/tests/test_build_ext.py", + "file_name": "test_build_ext.py", + "fun_name": "_try_compile_deployment_target", + "commit_message": "add python 3.10.4 for windows", + "code": "def _try_compile_deployment_target(self, operator, target):\n orig_environ = os.environ\n os.environ = orig_environ.copy()\n self.addCleanup(setattr, os, 'environ', orig_environ)\n\n if target is None:\n if os.environ.get('MACOSX_DEPLOYMENT_TARGET'):\n del os.environ['MACOSX_DEPLOYMENT_TARGET']\n else:\n os.environ['MACOSX_DEPLOYMENT_TARGET'] = target\n\n deptarget_c = os.path.join(self.tmp_dir, 'deptargetmodule.c')\n\n with open(deptarget_c, 'w') as fp:\n fp.write(textwrap.dedent( % operator))\n\n # get the deployment target that the interpreter was built with\n target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')\n target = tuple(map(int, target.split('.')[0:2]))\n # format the target value as defined in the Apple\n # Availability Macros. We can't use the macro names since\n # at least one value we test with will not exist yet.\n if target[:2] < (10, 10):\n # for 10.1 through 10.9.x -> \"10n0\"\n target = '%02d%01d0' % target\n else:\n # for 10.10 and beyond -> \"10nn00\"\n if len(target) >= 2:\n target = '%02d%02d00' % target\n else:\n # 11 and later can have no minor version (11 instead of 11.0)\n target = '%02d0000' % target\n deptarget_ext = Extension(\n 'deptarget',\n [deptarget_c],\n extra_compile_args=['-DTARGET=%s'%(target,)],\n )\n dist = Distribution({\n 'name': 'deptarget',\n 'ext_modules': [deptarget_ext]\n })\n dist.package_dir = self.tmp_dir\n cmd = self.build_ext(dist)\n cmd.build_lib = self.tmp_dir\n cmd.build_temp = self.tmp_dir\n\n try:\n old_stdout = sys.stdout\n if not support.verbose:\n # silence compiler output\n sys.stdout = StringIO()\n try:\n cmd.ensure_finalized()\n cmd.run()\n finally:\n sys.stdout = old_stdout\n\n except CompileError:\n self.fail(\"Wrong deployment target during compilation\")\n\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 704, + "n_words": 196, + "vocab_size": 129, + "complexity": 8, + "nloc": 55, + "token_counts": 288, + "n_ast_nodes": 500, + "n_identifiers": 47, + "d_id": 56858, + "documentation": { + "docstring": "\\\n #include \n\n int dummy;\n\n #if TARGET %s MAC_OS_X_VERSION_MIN_REQUIRED\n #else\n #error \"Unexpected target\"\n #endif\n\n ", + "n_words": 14, + "vocab_size": 14, + "n_whitespaces": 115, + "language": "en" + } + }, + { + "id": 80233, + "commit_id": "10dbbddaf35607e4257f50dd960520a1268dd225", + "repo": "wagtail", + "path": "wagtail/snippets/tests/test_locking.py", + "file_name": "test_locking.py", + "fun_name": "test_edit_post_locked_by_self", + "commit_message": "Add tests for locking snippets", + "code": "def test_edit_post_locked_by_self(self):\n \n # Lock the snippet\n self.lock_snippet(self.user)\n\n # Try to edit the snippet\n response = self.client.post(\n self.get_url(\"edit\"),\n {\"text\": \"Edited while locked\"},\n follow=True,\n )\n self.refresh_snippet()\n\n # Should not show error message\n self.assertNotContains(\n response,\n f\"The {self.model_name} could not be saved as it is locked\",\n )\n\n # Check that the snippet is still locked\n self.assertTrue(self.snippet.locked)\n\n # Check that the snippet is edited\n self.assertEqual(self.snippet.text, \"Edited while locked\")\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 216, + "n_words": 63, + "vocab_size": 45, + "complexity": 1, + "nloc": 14, + "token_counts": 77, + "n_ast_nodes": 142, + "n_identifiers": 17, + "d_id": 17037, + "documentation": { + "docstring": "A user can edit a snippet that is locked by themselves.", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 131233, + "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", + "repo": "ray", + "path": "python/ray/tests/test_advanced_4.py", + "file_name": "test_advanced_4.py", + "fun_name": "test_jemalloc_env_var_propagate", + "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", + "code": "def test_jemalloc_env_var_propagate():\n \n gcs_ptype = ray.ray_constants.PROCESS_TYPE_GCS_SERVER\n \n expected = {}\n actual = ray._private.services.propagate_jemalloc_env_var(\n jemalloc_path=\"\", jemalloc_conf=\"\", jemalloc_comps=[], process_type=gcs_ptype\n )\n assert actual == expected\n actual = ray._private.services.propagate_jemalloc_env_var(\n jemalloc_path=None,\n jemalloc_conf=\"a,b,c\",\n jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER],\n process_type=gcs_ptype,\n )\n assert actual == expected\n \n library_path = \"/abc\"\n expected = {\"LD_PRELOAD\": library_path}\n actual = ray._private.services.propagate_jemalloc_env_var(\n jemalloc_path=library_path,\n jemalloc_conf=\"\",\n jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER],\n process_type=gcs_ptype,\n )\n assert actual == expected\n\n # comps should be a list type.\n with pytest.raises(AssertionError):\n ray._private.services.propagate_jemalloc_env_var(\n jemalloc_path=library_path,\n jemalloc_conf=\"\",\n jemalloc_comps=\"ray.ray_constants.PROCESS_TYPE_GCS_SERVER,\",\n process_type=gcs_ptype,\n )\n\n # When comps don't match the process_type, it should return an empty dict.\n expected = {}\n actual = ray._private.services.propagate_jemalloc_env_var(\n jemalloc_path=library_path,\n jemalloc_conf=\"\",\n jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_RAYLET],\n process_type=gcs_ptype,\n )\n \n library_path = \"/abc\"\n malloc_conf = \"a,b,c\"\n expected = {\"LD_PRELOAD\": library_path, \"MALLOC_CONF\": malloc_conf}\n actual = ray._private.services.propagate_jemalloc_env_var(\n jemalloc_path=library_path,\n jemalloc_conf=malloc_conf,\n jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER],\n process_type=gcs_ptype,\n )\n assert actual == expected\n\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 381, + "n_words": 114, + "vocab_size": 52, + "complexity": 1, + "nloc": 57, + "token_counts": 258, + "n_ast_nodes": 420, + "n_identifiers": 20, + "d_id": 29488, + "documentation": { + "docstring": "Test `propagate_jemalloc_env_var`\n If the shared library path is not specified,\n it should return an empty dict.\n \n When the shared library is specified\n \n When the malloc config is specified\n ", + "n_words": 28, + "vocab_size": 20, + "n_whitespaces": 51, + "language": "en" + } + }, + { + "id": 264682, + "commit_id": "1636508a6ac8df6b93d0ea5c621c174f605fd47a", + "repo": "netbox", + "path": "netbox/users/models.py", + "file_name": "models.py", + "fun_name": "create_userconfig", + "commit_message": "Fixes #9156: Fix loading UserConfig data from fixtures", + "code": "def create_userconfig(instance, created, raw=False, **kwargs):\n \n if created and not raw:\n config = get_config()\n UserConfig(user=instance, data=config.DEFAULT_USER_PREFERENCES).save()\n\n\n#\n# REST API\n#\n", + "url": "https://github.com/netbox-community/netbox.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 37, + "n_words": 20, + "vocab_size": 18, + "complexity": 3, + "nloc": 4, + "token_counts": 42, + "n_ast_nodes": 71, + "n_identifiers": 12, + "d_id": 77785, + "documentation": { + "docstring": "\n Automatically create a new UserConfig when a new User is created. Skip this if importing a user from a fixture.\n ", + "n_words": 20, + "vocab_size": 16, + "n_whitespaces": 27, + "language": "en" + } + }, + { + "id": 167375, + "commit_id": "7d2f9b8d59908fbf57c6453bc41891efbfe981a6", + "repo": "pandas", + "path": "pandas/io/pytables.py", + "file_name": "pytables.py", + "fun_name": "validate_attr", + "commit_message": "TYP: some return annotations in pytables.py (#47512)", + "code": "def validate_attr(self, append) -> None:\n \n if append:\n existing_fields = getattr(self.attrs, self.kind_attr, None)\n if existing_fields is not None and existing_fields != list(self.values):\n raise ValueError(\"appended items do not match existing items in table!\")\n\n existing_dtype = getattr(self.attrs, self.dtype_attr, None)\n if existing_dtype is not None and existing_dtype != self.dtype:\n raise ValueError(\n \"appended items dtype do not match existing items dtype in table!\"\n )\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 181, + "n_words": 59, + "vocab_size": 34, + "complexity": 6, + "nloc": 11, + "token_counts": 78, + "n_ast_nodes": 124, + "n_identifiers": 13, + "d_id": 39982, + "documentation": { + "docstring": "validate that we have the same order as the existing & same dtype", + "n_words": 13, + "vocab_size": 11, + "n_whitespaces": 12, + "language": "en" + } + }, + { + "id": 180326, + "commit_id": "cb2713e7050f2783493736e43a6b704865ce61c5", + "repo": "gradio", + "path": "gradio/external.py", + "file_name": "external.py", + "fun_name": "load_blocks_from_repo", + "commit_message": "Getting Interface.load() working for 2.x and 3.x models and Spaces (#1361)\n\n* version\r\n\r\n* refactor for model and 2.x spaces\r\n\r\n* fixing tests\r\n\r\n* fixed tests\r\n\r\n* getting there...\r\n\r\n* formatting\r\n\r\n* formatting\r\n\r\n* fixes\r\n\r\n* formatting\r\n\r\n* external dependencies working\r\n\r\n* formatting\r\n\r\n* loading from 3.x\r\n\r\n* changes\r\n\r\n* wow finally it's working\r\n\r\n* fixed formatting\r\n\r\n* better error for spaces\r\n\r\n* better error for spaces\r\n\r\n* fixed 3.x bug\r\n\r\n* formatting", + "code": "def load_blocks_from_repo(name, src=None, api_key=None, alias=None, **kwargs):\n \n if src is None:\n tokens = name.split(\n \"/\"\n ) # Separate the source (e.g. \"huggingface\") from the repo name (e.g. \"google/vit-base-patch16-224\")\n assert (\n len(tokens) > 1\n ), \"Either `src` parameter must be provided, or `name` must be formatted as {src}/{repo name}\"\n src = tokens[0]\n name = \"/\".join(tokens[1:])\n assert src.lower() in factory_methods, \"parameter: src must be one of {}\".format(\n factory_methods.keys()\n )\n blocks: gradio.Blocks = factory_methods[src](name, api_key, alias, **kwargs)\n return blocks\n\n", + "url": "https://github.com/gradio-app/gradio.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 165, + "n_words": 75, + "vocab_size": 61, + "complexity": 2, + "nloc": 15, + "token_counts": 104, + "n_ast_nodes": 167, + "n_identifiers": 17, + "d_id": 43144, + "documentation": { + "docstring": "Creates and returns a Blocks instance from several kinds of Hugging Face repos:\n 1) A model repo\n 2) A Spaces repo running Gradio 2.x\n 3) A Spaces repo running Gradio 3.x\n ", + "n_words": 31, + "vocab_size": 24, + "n_whitespaces": 43, + "language": "en" + } + }, + { + "id": 61419, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py", + "file_name": "versioncontrol.py", + "fun_name": "get_backend_for_dir", + "commit_message": "upd; format", + "code": "def get_backend_for_dir(self, location):\n # type: (str) -> Optional[VersionControl]\n \n vcs_backends = {}\n for vcs_backend in self._registry.values():\n repo_path = vcs_backend.get_repository_root(location)\n if not repo_path:\n continue\n logger.debug('Determine that %s uses VCS: %s',\n location, vcs_backend.name)\n vcs_backends[repo_path] = vcs_backend\n\n if not vcs_backends:\n return None\n\n # Choose the VCS in the inner-most directory. Since all repository\n # roots found here would be either `location` or one of its\n # parents, the longest path should have the most path components,\n # i.e. the backend representing the inner-most repository.\n inner_most_repo_path = max(vcs_backends, key=len)\n return vcs_backends[inner_most_repo_path]\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 257, + "n_words": 86, + "vocab_size": 67, + "complexity": 4, + "nloc": 13, + "token_counts": 75, + "n_ast_nodes": 126, + "n_identifiers": 16, + "d_id": 12562, + "documentation": { + "docstring": "\n Return a VersionControl object if a repository of that type is found\n at the given directory.\n ", + "n_words": 16, + "vocab_size": 15, + "n_whitespaces": 38, + "language": "en" + } + }, + { + "id": 208258, + "commit_id": "3a7a82af9588629dad5807e0862bacbbd5d7a7f2", + "repo": "celery", + "path": "celery/canvas.py", + "file_name": "canvas.py", + "fun_name": "_stamp_regen_task", + "commit_message": "Canvas.py doc enhancement (#7889)\n\n* Enhanced doc for canvas.maybe_unroll_group()\r\n\r\n* Enhanced doc for canvas._stamp_regen_task()\r\n\r\n* Enhanced doc for canvas._merge_dictionaries()", + "code": "def _stamp_regen_task(task, visitor, **headers):\n \n\n task.stamp(visitor=visitor, **headers)\n return task\n\n", + "url": "https://github.com/celery/celery.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 17, + "n_words": 8, + "vocab_size": 8, + "complexity": 1, + "nloc": 3, + "token_counts": 24, + "n_ast_nodes": 39, + "n_identifiers": 5, + "d_id": 52267, + "documentation": { + "docstring": "When stamping a sequence of tasks created by a generator,\n we use this function to stamp each task in the generator\n without exhausting it.", + "n_words": 24, + "vocab_size": 23, + "n_whitespaces": 29, + "language": "en" + } + }, + { + "id": 191835, + "commit_id": "11e1a8a3fa8d13513fe926b731fb907a066af2a1", + "repo": "ydata-profiling", + "path": "src/pandas_profiling/visualisation/context.py", + "file_name": "context.py", + "fun_name": "manage_matplotlib_context", + "commit_message": "fix: change context managed backend (#1149)", + "code": "def manage_matplotlib_context() -> Any:\n \n originalRcParams = matplotlib.rcParams.copy()\n\n # Credits for this style go to the ggplot and seaborn packages.\n # We copied the style file to remove dependencies on the Seaborn package.\n # Check it out, it's an awesome library for plotting\n customRcParams = {\n \"patch.facecolor\": \"#348ABD\", # blue\n \"patch.antialiased\": True,\n \"font.size\": 10.0,\n \"figure.edgecolor\": \"0.50\",\n # Seaborn common parameters\n \"figure.facecolor\": \"white\",\n \"text.color\": \".15\",\n \"axes.labelcolor\": \".15\",\n \"legend.numpoints\": 1,\n \"legend.scatterpoints\": 1,\n \"xtick.direction\": \"out\",\n \"ytick.direction\": \"out\",\n \"xtick.color\": \".15\",\n \"ytick.color\": \".15\",\n \"axes.axisbelow\": True,\n \"image.cmap\": \"Greys\",\n \"font.family\": [\"sans-serif\"],\n \"font.sans-serif\": [\n \"Arial\",\n \"Liberation Sans\",\n \"Bitstream Vera Sans\",\n \"sans-serif\",\n ],\n \"grid.linestyle\": \"-\",\n \"lines.solid_capstyle\": \"round\",\n # Seaborn darkgrid parameters\n # .15 = dark_gray\n # .8 = light_gray\n \"axes.grid\": True,\n \"axes.facecolor\": \"#EAEAF2\",\n \"axes.edgecolor\": \"white\",\n \"axes.linewidth\": 0,\n \"grid.color\": \"white\",\n # Seaborn notebook context\n \"figure.figsize\": [8.0, 5.5],\n \"axes.labelsize\": 11,\n \"axes.titlesize\": 12,\n \"xtick.labelsize\": 10,\n \"ytick.labelsize\": 10,\n \"legend.fontsize\": 10,\n \"grid.linewidth\": 1,\n \"lines.linewidth\": 1.75,\n \"patch.linewidth\": 0.3,\n \"lines.markersize\": 7,\n \"lines.markeredgewidth\": 0,\n \"xtick.major.width\": 1,\n \"ytick.major.width\": 1,\n \"xtick.minor.width\": 0.5,\n \"ytick.minor.width\": 0.5,\n \"xtick.major.pad\": 7,\n \"ytick.major.pad\": 7,\n \"backend\": \"agg\",\n }\n\n try:\n register_matplotlib_converters()\n matplotlib.rcParams.update(customRcParams)\n sns.set_style(style=\"white\")\n yield\n finally:\n deregister_matplotlib_converters() # revert to original unit registries\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=matplotlib.cbook.mplDeprecation)\n matplotlib.rcParams.update(originalRcParams) # revert to original rcParams\n", + "url": "https://github.com/ydataai/ydata-profiling.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 662, + "n_words": 184, + "vocab_size": 139, + "complexity": 2, + "nloc": 62, + "token_counts": 273, + "n_ast_nodes": 503, + "n_identifiers": 19, + "d_id": 46847, + "documentation": { + "docstring": "Return a context manager for temporarily changing matplotlib unit registries and rcParams.", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 11, + "language": "en" + } + }, + { + "id": 102175, + "commit_id": "bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d", + "repo": "pytorch", + "path": "tools/test/test_gen_backend_stubs.py", + "file_name": "test_gen_backend_stubs.py", + "fun_name": "test_empty_backend", + "commit_message": "Revert \"Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels\" (#69950)\n\nSummary:\nPull Request resolved: https://github.com/pytorch/pytorch/pull/69950\n\nThis reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa.\n\nTest Plan: Imported from OSS\n\nReviewed By: albanD\n\nDifferential Revision: D33113545\n\nPulled By: bdhirsh\n\nfbshipit-source-id: d6590294662588d36c09662dea65919ad4e1e288", + "code": "def test_empty_backend(self) -> None:\n yaml_str = \n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, )\n", + "url": "https://github.com/pytorch/pytorch.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 32, + "n_words": 11, + "vocab_size": 10, + "complexity": 1, + "nloc": 8, + "token_counts": 26, + "n_ast_nodes": 47, + "n_identifiers": 6, + "d_id": 21490, + "documentation": { + "docstring": "\\\nbackend:\ncpp_namespace: torch_xla\nsupported:\n- absYou must provide a value for \"backend\"", + "n_words": 13, + "vocab_size": 13, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 136419, + "commit_id": "e368dd9b4e10026767df66d1811a92bd8ca2d8f9", + "repo": "ray", + "path": "rllib/algorithms/algorithm.py", + "file_name": "algorithm.py", + "fun_name": "_run_offline_evaluation", + "commit_message": "[RLlib] By-pass Evaluation workers when doing OPE (#30135)\n\nSigned-off-by: Kourosh Hakhamaneshi ", + "code": "def _run_offline_evaluation(self):\n \n assert len(self.workers.local_worker().policy_map) == 1\n\n parallelism = self.evaluation_config.evaluation_num_workers or 1\n offline_eval_results = {\"off_policy_estimator\": {}}\n for evaluator_name, offline_evaluator in self.reward_estimators.items():\n offline_eval_results[\"off_policy_estimator\"][\n evaluator_name\n ] = offline_evaluator.estimate_on_dataset(\n self.evaluation_dataset,\n n_parallelism=parallelism,\n )\n return offline_eval_results\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 150, + "n_words": 30, + "vocab_size": 26, + "complexity": 3, + "nloc": 12, + "token_counts": 74, + "n_ast_nodes": 121, + "n_identifiers": 17, + "d_id": 30913, + "documentation": { + "docstring": "Runs offline evaluation via `OfflineEvaluator.estimate_on_dataset()` API.\n\n This method will be used when `evaluation_dataset` is provided.\n Note: This will only work if the policy is a single agent policy.\n\n Returns:\n The results dict from the offline evaluation call.\n ", + "n_words": 37, + "vocab_size": 31, + "n_whitespaces": 76, + "language": "en" + } + }, + { + "id": 112126, + "commit_id": "14d2966b9e91ae16dcc39de8f41017a75cec8ff9", + "repo": "nni", + "path": "nni/retiarii/oneshot/pytorch/base_lightning.py", + "file_name": "base_lightning.py", + "fun_name": "configure_optimizers", + "commit_message": "Valuechoice oneshot lightning (#4602)", + "code": "def configure_optimizers(self):\n \n # pylint: disable=assignment-from-none\n arc_optimizers = self.configure_architecture_optimizers()\n if arc_optimizers is None:\n return self.model.configure_optimizers()\n\n if isinstance(arc_optimizers, optim.Optimizer):\n arc_optimizers = [arc_optimizers]\n self.arc_optim_count = len(arc_optimizers)\n\n # The return values ``frequency`` and ``monitor`` are ignored because lightning requires\n # ``len(optimizers) == len(frequency)``, and gradient backword is handled manually.\n # For data structure of variables below, please see pytorch lightning docs of ``configure_optimizers``.\n w_optimizers, lr_schedulers, self.frequencies, monitor = \\\n self.trainer._configure_optimizers(self.model.configure_optimizers())\n lr_schedulers = self.trainer._configure_schedulers(lr_schedulers, monitor, not self.automatic_optimization)\n if any(sch[\"scheduler\"].optimizer not in w_optimizers for sch in lr_schedulers):\n raise Exception(\n \"Some schedulers are attached with an optimizer that wasn't returned from `configure_optimizers`.\"\n )\n\n # variables used to handle optimizer frequency\n self.cur_optimizer_step = 0\n self.cur_optimizer_index = 0\n\n return arc_optimizers + w_optimizers, lr_schedulers\n", + "url": "https://github.com/microsoft/nni.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 296, + "n_words": 114, + "vocab_size": 85, + "complexity": 5, + "nloc": 17, + "token_counts": 130, + "n_ast_nodes": 211, + "n_identifiers": 24, + "d_id": 24584, + "documentation": { + "docstring": "\n Combine architecture optimizers and user's model optimizers.\n You can overwrite configure_architecture_optimizers if architecture optimizers are needed in your NAS algorithm.\n For now ``self.model`` is tested against :class:`nni.retiarii.evaluator.pytorch.lightning._SupervisedLearningModule`\n and it only returns 1 optimizer.\n But for extendibility, codes for other return value types are also implemented.\n ", + "n_words": 45, + "vocab_size": 40, + "n_whitespaces": 88, + "language": "en" + } + }, + { + "id": 192419, + "commit_id": "289fce29b3e2392114aadbe7a419df0f2e3ac1be", + "repo": "vision", + "path": "torchvision/transforms/_functional_video.py", + "file_name": "_functional_video.py", + "fun_name": "crop", + "commit_message": "Replace asserts with exceptions (#5587)\n\n* replace most asserts with exceptions\r\n\r\n* fix formating issues\r\n\r\n* fix linting and remove more asserts\r\n\r\n* fix regresion\r\n\r\n* fix regresion\r\n\r\n* fix bug\r\n\r\n* apply ufmt\r\n\r\n* apply ufmt\r\n\r\n* fix tests\r\n\r\n* fix format\r\n\r\n* fix None check\r\n\r\n* fix detection models tests\r\n\r\n* non scriptable any\r\n\r\n* add more checks for None values\r\n\r\n* fix retinanet test\r\n\r\n* fix retinanet test\r\n\r\n* Update references/classification/transforms.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update references/classification/transforms.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update references/optical_flow/transforms.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update references/optical_flow/transforms.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update references/optical_flow/transforms.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* make value checks more pythonic:\r\n\r\n* Update references/optical_flow/transforms.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* make value checks more pythonic\r\n\r\n* make more checks pythonic\r\n\r\n* fix bug\r\n\r\n* appy ufmt\r\n\r\n* fix tracing issues\r\n\r\n* fib typos\r\n\r\n* fix lint\r\n\r\n* remove unecessary f-strings\r\n\r\n* fix bug\r\n\r\n* Update torchvision/datasets/mnist.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update torchvision/datasets/mnist.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update torchvision/ops/boxes.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update torchvision/ops/poolers.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update torchvision/utils.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* address PR comments\r\n\r\n* Update torchvision/io/_video_opt.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update torchvision/models/detection/generalized_rcnn.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update torchvision/models/feature_extraction.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update torchvision/models/optical_flow/raft.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* address PR comments\r\n\r\n* addressing further pr comments\r\n\r\n* fix bug\r\n\r\n* remove unecessary else\r\n\r\n* apply ufmt\r\n\r\n* last pr comment\r\n\r\n* replace RuntimeErrors\r\n\r\nCo-authored-by: Nicolas Hug ", + "code": "def crop(clip, i, j, h, w):\n \n if len(clip.size()) != 4:\n raise ValueError(\"clip should be a 4D tensor\")\n return clip[..., i : i + h, j : j + w]\n\n", + "url": "https://github.com/pytorch/vision.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 45, + "n_words": 29, + "vocab_size": 24, + "complexity": 2, + "nloc": 4, + "token_counts": 48, + "n_ast_nodes": 74, + "n_identifiers": 9, + "d_id": 46893, + "documentation": { + "docstring": "\n Args:\n clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)\n ", + "n_words": 14, + "vocab_size": 13, + "n_whitespaces": 28, + "language": "en" + } + }, + { + "id": 269598, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/backend.py", + "file_name": "backend.py", + "fun_name": "binary_crossentropy", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def binary_crossentropy(target, output, from_logits=False):\n \n target = tf.convert_to_tensor(target)\n output = tf.convert_to_tensor(output)\n\n # Use logits whenever they are available. `softmax` and `sigmoid`\n # activations cache logits on the `output` Tensor.\n if hasattr(output, \"_keras_logits\"):\n output = output._keras_logits # pylint: disable=protected-access\n if from_logits:\n warnings.warn(\n '\"`binary_crossentropy` received `from_logits=True`, but the `output`'\n \" argument was produced by a sigmoid or softmax activation and thus \"\n 'does not represent logits. Was this intended?\"',\n stacklevel=2,\n )\n from_logits = True\n\n if from_logits:\n return tf.nn.sigmoid_cross_entropy_with_logits(\n labels=target, logits=output\n )\n\n if (\n not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable))\n and output.op.type == \"Sigmoid\"\n ) and not hasattr(output, \"_keras_history\"):\n # When sigmoid activation function is used for output operation, we\n # use logits from the sigmoid function directly to compute loss in order\n # to prevent collapsing zero when training.\n assert len(output.op.inputs) == 1\n output = output.op.inputs[0]\n return tf.nn.sigmoid_cross_entropy_with_logits(\n labels=target, logits=output\n )\n\n epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)\n output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_)\n\n # Compute cross entropy from probabilities.\n bce = target * tf.math.log(output + epsilon())\n bce += (1 - target) * tf.math.log(1 - output + epsilon())\n return -bce\n\n\n@keras_export(\"keras.backend.binary_focal_crossentropy\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "@keras_export(\"keras.backend.binary_focal_crossentropy\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", + "n_ast_errors": 1, + "ast_levels": 14, + "n_whitespaces": 421, + "n_words": 176, + "vocab_size": 121, + "complexity": 7, + "nloc": 31, + "token_counts": 222, + "n_ast_nodes": 387, + "n_identifiers": 37, + "d_id": 80219, + "documentation": { + "docstring": "Binary crossentropy between an output tensor and a target tensor.\n\n Args:\n target: A tensor with the same shape as `output`.\n output: A tensor.\n from_logits: Whether `output` is expected to be a logits tensor.\n By default, we consider that `output`\n encodes a probability distribution.\n\n Returns:\n A tensor.\n ", + "n_words": 46, + "vocab_size": 37, + "n_whitespaces": 105, + "language": "en" + } + }, + { + "id": 281704, + "commit_id": "6a66f3f3ed934e0615ff4ba283ee67fcc43d3656", + "repo": "OpenBBTerminal", + "path": "gamestonk_terminal/custom/quantitative_analysis/qa_controller.py", + "file_name": "qa_controller.py", + "fun_name": "print_help", + "commit_message": "Custom data context (#1193)\n\n* Add first iteration of custom context\r\n\r\n* Add sample data + improve plot\r\n\r\n* Change `head` to `show` with sorting and limit. Add \"-x\" to plot and dynamic update of completer\r\n\r\n* generate random time series for test csv\r\n\r\n* Make columns lower case. Check if date is in columns and convert to timestamp. Improve plotting for dates\r\n\r\n* Add qa to custom\r\n\r\n* Add pred to custom\r\n\r\n* Hugooooo\r\n\r\n* Testing\r\n\r\n* dang whitespace\r\n\r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: didierlopes.eth ", + "code": "def print_help(self):\n \n help_text = f\n console.print(text=help_text, menu=\"Custom - Quantitative Analysis\")\n", + "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 31, + "n_words": 10, + "vocab_size": 10, + "complexity": 1, + "nloc": 31, + "token_counts": 22, + "n_ast_nodes": 54, + "n_identifiers": 9, + "d_id": 83981, + "documentation": { + "docstring": "Print help[cmds]\n load load new data file\n pick pick target column for analysis[/cmds]\n\n[param]File: [/param]{self.file}\n[param]Target Column: [/param]{self.target}\n[cmds]\n[info]Statistics:[/info]\n summary brief summary statistics of loaded stock.\n normality normality statistics and tests\n unitroot unit root test for stationarity (ADF, KPSS)\n[info]Plots:[/info]\n line line plot of selected target\n hist histogram with density plot\n cdf cumulative distribution function\n bw box and whisker plot\n acf (partial) auto-correlation function differentials of prices\n qqplot residuals against standard normal curve\n[info]Rolling Metrics:[/info]\n rolling rolling mean and std deviation of prices\n spread rolling variance and std deviation of prices\n quantile rolling median and quantile of prices\n skew rolling skewness of distribution of prices\n kurtosis rolling kurtosis of distribution of prices\n[info]Other:[/info]\n raw print raw data\n decompose decomposition in cyclic-trend, season, and residuals of prices\n cusum detects abrupt changes using cumulative sum algorithm of prices[/cmds]\n ", + "n_words": 137, + "vocab_size": 89, + "n_whitespaces": 297, + "language": "en" + } + }, + { + "id": 217668, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/html/__init__.py", + "file_name": "__init__.py", + "fun_name": "unescape", + "commit_message": "add python 3.10.4 for windows", + "code": "def unescape(s):\n \n if '&' not in s:\n return s\n return _charref.sub(_replace_charref, s)\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 7, + "n_whitespaces": 28, + "n_words": 12, + "vocab_size": 11, + "complexity": 2, + "nloc": 4, + "token_counts": 23, + "n_ast_nodes": 40, + "n_identifiers": 5, + "d_id": 54879, + "documentation": { + "docstring": "\n Convert all named and numeric character references (e.g. >, >,\n &x3e;) in the string s to the corresponding unicode characters.\n This function uses the rules defined by the HTML 5 standard\n for both valid and invalid character references, and the list of\n HTML 5 named character references defined in html.entities.html5.\n ", + "n_words": 50, + "vocab_size": 36, + "n_whitespaces": 69, + "language": "en" + } + }, + { + "id": 9407, + "commit_id": "7375ee364e0df2a417f92593e09557f1b2a3575a", + "repo": "insightface", + "path": "reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/upfirdn_2d.py", + "file_name": "upfirdn_2d.py", + "fun_name": "upsample_conv_2d", + "commit_message": "initialize ostec", + "code": "def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):\n r\n\n assert isinstance(factor, int) and factor >= 1\n\n # Check weight shape.\n w = tf.convert_to_tensor(w)\n assert w.shape.rank == 4\n convH = w.shape[0].value\n convW = w.shape[1].value\n inC = _shape(w, 2)\n outC = _shape(w, 3)\n assert convW == convH\n\n # Setup filter kernel.\n if k is None:\n k = [1] * factor\n k = _setup_kernel(k) * (gain * (factor ** 2))\n p = (k.shape[0] - factor) - (convW - 1)\n\n # Determine data dimensions.\n if data_format == 'NCHW':\n stride = [1, 1, factor, factor]\n output_shape = [_shape(x, 0), outC, (_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW]\n num_groups = _shape(x, 1) // inC\n else:\n stride = [1, factor, factor, 1]\n output_shape = [_shape(x, 0), (_shape(x, 1) - 1) * factor + convH, (_shape(x, 2) - 1) * factor + convW, outC]\n num_groups = _shape(x, 3) // inC\n\n # Transpose weights.\n w = tf.reshape(w, [convH, convW, inC, num_groups, -1])\n w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2])\n w = tf.reshape(w, [convH, convW, -1, num_groups * inC])\n\n # Execute.\n x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format=data_format)\n return _simple_upfirdn_2d(x, k, pad0=(p+1)//2+factor-1, pad1=p//2+1, data_format=data_format, impl=impl)\n\n#----------------------------------------------------------------------------\n", + "url": "https://github.com/deepinsight/insightface.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 317, + "n_words": 198, + "vocab_size": 110, + "complexity": 4, + "nloc": 48, + "token_counts": 387, + "n_ast_nodes": 602, + "n_identifiers": 34, + "d_id": 1607, + "documentation": { + "docstring": "Fused `upsample_2d()` followed by `tf.nn.conv2d()`.\n\n Padding is performed only once at the beginning, not between the operations.\n The fused op is considerably more efficient than performing the same calculation\n using standard TensorFlow ops. It supports gradients of arbitrary order.\n\n Args:\n x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.\n w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`.\n Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.\n k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).\n The default is `[1] * factor`, which corresponds to nearest-neighbor\n upsampling.\n factor: Integer upsampling factor (default: 2).\n gain: Scaling factor for signal magnitude (default: 1.0).\n data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).\n impl: Name of the implementation to use. Can be `\"ref\"` or `\"cuda\"` (default).\n\n Returns:\n Tensor of the shape `[N, C, H * factor, W * factor]` or\n `[N, H * factor, W * factor, C]`, and same datatype as `x`.\n ", + "n_words": 158, + "vocab_size": 114, + "n_whitespaces": 358, + "language": "en" + } + }, + { + "id": 65150, + "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", + "repo": "erpnext", + "path": "erpnext/accounts/report/asset_depreciations_and_balances/asset_depreciations_and_balances.py", + "file_name": "asset_depreciations_and_balances.py", + "fun_name": "get_assets", + "commit_message": "style: format code with black", + "code": "def get_assets(filters):\n\treturn frappe.db.sql(\n\t\t,\n\t\t{\"to_date\": filters.to_date, \"from_date\": filters.from_date, \"company\": filters.company},\n\t\tas_dict=1,\n\t)\n\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 7, + "n_words": 13, + "vocab_size": 13, + "complexity": 1, + "nloc": 49, + "token_counts": 39, + "n_ast_nodes": 64, + "n_identifiers": 9, + "d_id": 13808, + "documentation": { + "docstring": "\n\t\tSELECT results.asset_category,\n\t\t\t sum(results.accumulated_depreciation_as_on_from_date) as accumulated_depreciation_as_on_from_date,\n\t\t\t sum(results.depreciation_eliminated_during_the_period) as depreciation_eliminated_during_the_period,\n\t\t\t sum(results.depreciation_amount_during_the_period) as depreciation_amount_during_the_period\n\t\tfrom (SELECT a.asset_category,\n\t\t\t\t ifnull(sum(case when ds.schedule_date < %(from_date)s and (ifnull(a.disposal_date, 0) = 0 or a.disposal_date >= %(from_date)s) then\n\t\t\t\t\t\t\t\t ds.depreciation_amount\n\t\t\t\t\t\t\t else\n\t\t\t\t\t\t\t\t 0\n\t\t\t\t\t\t\t end), 0) as accumulated_depreciation_as_on_from_date,\n\t\t\t\t ifnull(sum(case when ifnull(a.disposal_date, 0) != 0 and a.disposal_date >= %(from_date)s\n\t\t\t\t\t\t\t\t\t\tand a.disposal_date <= %(to_date)s and ds.schedule_date <= a.disposal_date then\n\t\t\t\t\t\t\t\t ds.depreciation_amount\n\t\t\t\t\t\t\t else\n\t\t\t\t\t\t\t\t 0\n\t\t\t\t\t\t\t end), 0) as depreciation_eliminated_during_the_period,\n\t\t\t\t ifnull(sum(case when ds.schedule_date >= %(from_date)s and ds.schedule_date <= %(to_date)s\n\t\t\t\t\t\t\t\t\t\tand (ifnull(a.disposal_date, 0) = 0 or ds.schedule_date <= a.disposal_date) then\n\t\t\t\t\t\t\t\t ds.depreciation_amount\n\t\t\t\t\t\t\t else\n\t\t\t\t\t\t\t\t 0\n\t\t\t\t\t\t\t end), 0) as depreciation_amount_during_the_period\n\t\t\tfrom `tabAsset` a, `tabDepreciation Schedule` ds\n\t\t\twhere a.docstatus=1 and a.company=%(company)s and a.purchase_date <= %(to_date)s and a.name = ds.parent and ifnull(ds.journal_entry, '') != ''\n\t\t\tgroup by a.asset_category\n\t\t\tunion\n\t\t\tSELECT a.asset_category,\n\t\t\t\t ifnull(sum(case when ifnull(a.disposal_date, 0) != 0 and (a.disposal_date < %(from_date)s or a.disposal_date > %(to_date)s) then\n\t\t\t\t\t\t\t\t\t0\n\t\t\t\t\t\t\t else\n\t\t\t\t\t\t\t\t\ta.opening_accumulated_depreciation\n\t\t\t\t\t\t\t end), 0) as accumulated_depreciation_as_on_from_date,\n\t\t\t\t ifnull(sum(case when a.disposal_date >= %(from_date)s and a.disposal_date <= %(to_date)s then\n\t\t\t\t\t\t\t\t a.opening_accumulated_depreciation\n\t\t\t\t\t\t\t else\n\t\t\t\t\t\t\t\t 0\n\t\t\t\t\t\t\t end), 0) as depreciation_eliminated_during_the_period,\n\t\t\t\t 0 as depreciation_amount_during_the_period\n\t\t\tfrom `tabAsset` a\n\t\t\twhere a.docstatus=1 and a.company=%(company)s and a.purchase_date <= %(to_date)s\n\t\t\tgroup by a.asset_category) as results\n\t\tgroup by results.asset_category\n\t\t", + "n_words": 178, + "vocab_size": 61, + "n_whitespaces": 209, + "language": "en" + } + }, + { + "id": 198878, + "commit_id": "68bd82de645a61f4bbc0b6246e70959373c9cba2", + "repo": "sympy", + "path": "sympy/printing/aesaracode.py", + "file_name": "aesaracode.py", + "fun_name": "_get_or_create", + "commit_message": "fix(printing): change Aesara argument broadcastable to shape", + "code": "def _get_or_create(self, s, name=None, dtype=None, broadcastable=None):\n \n\n # Defaults\n if name is None:\n name = s.name\n if dtype is None:\n dtype = 'floatX'\n if broadcastable is None:\n broadcastable = ()\n\n key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable)\n\n if key in self.cache:\n return self.cache[key]\n\n value = aet.tensor(name=name, dtype=dtype, shape=broadcastable)\n self.cache[key] = value\n return value\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 165, + "n_words": 51, + "vocab_size": 30, + "complexity": 5, + "nloc": 13, + "token_counts": 107, + "n_ast_nodes": 164, + "n_identifiers": 13, + "d_id": 49056, + "documentation": { + "docstring": "\n Get the Aesara variable for a SymPy symbol from the cache, or create it\n if it does not exist.\n ", + "n_words": 19, + "vocab_size": 17, + "n_whitespaces": 41, + "language": "en" + } + }, + { + "id": 265204, + "commit_id": "fcd1daaf798d62023f999c3e09e035f7b3f47c8f", + "repo": "netbox", + "path": "netbox/dcim/models/device_components.py", + "file_name": "device_components.py", + "fun_name": "get_downstream_powerports", + "commit_message": "Update power utilization calculations for new cabling model", + "code": "def get_downstream_powerports(self, leg=None):\n \n poweroutlets = self.poweroutlets.filter(cable__isnull=False)\n if leg:\n poweroutlets = poweroutlets.filter(feed_leg=leg)\n if not poweroutlets:\n return PowerPort.objects.none()\n\n q = Q()\n for poweroutlet in poweroutlets:\n q |= Q(\n cable=poweroutlet.cable,\n cable_end=poweroutlet.opposite_cable_end\n )\n\n return PowerPort.objects.filter(q)\n", + "url": "https://github.com/netbox-community/netbox.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 154, + "n_words": 31, + "vocab_size": 24, + "complexity": 4, + "nloc": 13, + "token_counts": 82, + "n_ast_nodes": 132, + "n_identifiers": 16, + "d_id": 78026, + "documentation": { + "docstring": "\n Return a queryset of all PowerPorts connected via cable to a child PowerOutlet.\n ", + "n_words": 13, + "vocab_size": 12, + "n_whitespaces": 28, + "language": "en" + } + }, + { + "id": 284320, + "commit_id": "e59a30b18873f7449bc59a88c3da21894e0dbe0a", + "repo": "OpenBBTerminal", + "path": "openbb_terminal/cryptocurrency/due_diligence/dd_controller.py", + "file_name": "dd_controller.py", + "fun_name": "call_news", + "commit_message": "Add crypto DD commands (#1710)\n\n* Add github activity over time\r\n\r\n* Create tests\r\n\r\n* Update default days in chart command\r\n\r\n* Add san package to poetry\r\n\r\n* Fix tests failed\r\n\r\n* Generate fixtures\r\n\r\n* Fix tests failed\r\n\r\n* Remove sanpy package and use requests instead\r\n\r\n* Adjust index\r\n\r\n* Add hugo server\r\n\r\n* Fix datetime\r\n\r\n* Update tests\r\n\r\n* Revert \"Update tests\"\r\n\r\nThis reverts commit ffe03a7224cd830a14d2f425d7d59f00a10f27ac.\r\n\r\n* Fix tests\r\n\r\n* Regenerate cassettes & filter api tokens\r\n\r\n* Fix windows issues\r\n\r\n* Fix PR comments\r\n\r\n* Pass tests & fix comments", + "code": "def call_news(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"news\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n\n parser.add_argument(\n \"-l\",\n \"--limit\",\n dest=\"limit\",\n type=check_positive,\n help=\"display N number records\",\n default=10,\n )\n\n parser.add_argument(\n \"-k\",\n \"--kind\",\n dest=\"kind\",\n type=str,\n help=\"Filter by category of news. Available values: news or media.\",\n default=\"news\",\n choices=cryptopanic_model.CATEGORIES,\n )\n\n parser.add_argument(\n \"-f\",\n \"--filter\",\n dest=\"filter\",\n type=str,\n help=\"Filter by kind of news. One from list: rising|hot|bullish|bearish|important|saved|lol\",\n default=None,\n required=False,\n choices=cryptopanic_model.FILTERS,\n )\n\n parser.add_argument(\n \"-r\",\n \"--region\",\n dest=\"region\",\n type=str,\n help=\"Filter news by regions. Available regions are: en (English), de (Deutsch), nl (Dutch), es (Español), \"\n \"fr (Français), it (Italiano), pt (Português), ru (Русский)\",\n default=\"en\",\n choices=cryptopanic_model.REGIONS,\n )\n\n parser.add_argument(\n \"-s\",\n \"--sort\",\n dest=\"sortby\",\n type=str,\n help=\"Sort by given column. Default: published_at\",\n default=\"published_at\",\n choices=cryptopanic_model.SORT_FILTERS,\n )\n\n parser.add_argument(\n \"--descend\",\n action=\"store_false\",\n help=\"Flag to sort in descending order (lowest first)\",\n dest=\"descend\",\n default=True,\n )\n\n parser.add_argument(\n \"-u\",\n \"--urls\",\n dest=\"urls\",\n action=\"store_false\",\n help=\"Flag to disable urls. If you will use the flag you will hide the column with urls\",\n default=True,\n )\n\n ns_parser = parse_known_args_and_warn(\n parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED\n )\n\n if ns_parser:\n cryptopanic_view.display_news(\n top=ns_parser.limit,\n source=self.source,\n currency=self.coin,\n export=ns_parser.export,\n descend=ns_parser.descend,\n post_kind=ns_parser.kind,\n filter_=ns_parser.filter,\n region=ns_parser.region,\n )\n", + "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 1015, + "n_words": 161, + "vocab_size": 126, + "complexity": 2, + "nloc": 83, + "token_counts": 301, + "n_ast_nodes": 480, + "n_identifiers": 43, + "d_id": 84695, + "documentation": { + "docstring": "Process news commandDisplay most recent news on the given coin from CryptoPanic aggregator platform.\n [Source: https://cryptopanic.com/]", + "n_words": 16, + "vocab_size": 15, + "n_whitespaces": 26, + "language": "en" + } + }, + { + "id": 107046, + "commit_id": "c682ca40c647770a967b6b8a7615eb91c7cb3fc9", + "repo": "matplotlib", + "path": "lib/matplotlib/_constrained_layout.py", + "file_name": "_constrained_layout.py", + "fun_name": "make_layoutgrids_gs", + "commit_message": "FIX: better repr for subgridspecs", + "code": "def make_layoutgrids_gs(layoutgrids, gs):\n \n\n if gs in layoutgrids or gs.figure is None:\n return layoutgrids\n # in order to do constrained_layout there has to be at least *one*\n # gridspec in the tree:\n layoutgrids['hasgrids'] = True\n if not hasattr(gs, '_subplot_spec'):\n # normal gridspec\n parent = layoutgrids[gs.figure]\n layoutgrids[gs] = mlayoutgrid.LayoutGrid(\n parent=parent,\n parent_inner=True,\n name='gridspec',\n ncols=gs._ncols, nrows=gs._nrows,\n width_ratios=gs.get_width_ratios(),\n height_ratios=gs.get_height_ratios())\n else:\n # this is a gridspecfromsubplotspec:\n subplot_spec = gs._subplot_spec\n parentgs = subplot_spec.get_gridspec()\n # if a nested gridspec it is possible the parent is not in there yet:\n if parentgs not in layoutgrids:\n layoutgrids = make_layoutgrids_gs(layoutgrids, parentgs)\n subspeclb = layoutgrids[parentgs]\n # get a unique representation:\n rep = object.__repr__(gs) + 'top'\n # gridspecfromsubplotspec need an outer container:\n if rep not in layoutgrids:\n layoutgrids[rep] = mlayoutgrid.LayoutGrid(\n parent=subspeclb,\n name='top',\n nrows=1, ncols=1,\n parent_pos=(subplot_spec.rowspan, subplot_spec.colspan))\n layoutgrids[gs] = mlayoutgrid.LayoutGrid(\n parent=layoutgrids[rep],\n name='gridspec',\n nrows=gs._nrows, ncols=gs._ncols,\n width_ratios=gs.get_width_ratios(),\n height_ratios=gs.get_height_ratios())\n return layoutgrids\n\n", + "url": "https://github.com/matplotlib/matplotlib.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 510, + "n_words": 134, + "vocab_size": 80, + "complexity": 6, + "nloc": 33, + "token_counts": 230, + "n_ast_nodes": 361, + "n_identifiers": 29, + "d_id": 22565, + "documentation": { + "docstring": "\n Make the layoutgrid for a gridspec (and anything nested in the gridspec)\n ", + "n_words": 12, + "vocab_size": 11, + "n_whitespaces": 19, + "language": "en" + } + }, + { + "id": 220737, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/asyncio/sslproto.py", + "file_name": "sslproto.py", + "fun_name": "data_received", + "commit_message": "add python 3.10.4 for windows", + "code": "def data_received(self, data):\n \n if self._sslpipe is None:\n # transport closing, sslpipe is destroyed\n return\n\n try:\n ssldata, appdata = self._sslpipe.feed_ssldata(data)\n except (SystemExit, KeyboardInterrupt):\n raise\n except BaseException as e:\n self._fatal_error(e, 'SSL error in data received')\n return\n\n for chunk in ssldata:\n self._transport.write(chunk)\n\n for chunk in appdata:\n if chunk:\n try:\n if self._app_protocol_is_buffer:\n protocols._feed_data_to_buffered_proto(\n self._app_protocol, chunk)\n else:\n self._app_protocol.data_received(chunk)\n except (SystemExit, KeyboardInterrupt):\n raise\n except BaseException as ex:\n self._fatal_error(\n ex, 'application protocol failed to receive SSL data')\n return\n else:\n self._start_shutdown()\n break\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 488, + "n_words": 74, + "vocab_size": 55, + "complexity": 10, + "nloc": 29, + "token_counts": 130, + "n_ast_nodes": 217, + "n_identifiers": 21, + "d_id": 56109, + "documentation": { + "docstring": "Called when some SSL data is received.\n\n The argument is a bytes object.\n ", + "n_words": 13, + "vocab_size": 12, + "n_whitespaces": 27, + "language": "en" + } + }, + { + "id": 226141, + "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", + "repo": "plotly.py", + "path": "packages/python/plotly/plotly/figure_factory/_candlestick.py", + "file_name": "_candlestick.py", + "fun_name": "make_increasing_candle", + "commit_message": "switch to black .22", + "code": "def make_increasing_candle(open, high, low, close, dates, **kwargs):\n \n increase_x, increase_y = _Candlestick(\n open, high, low, close, dates, **kwargs\n ).get_candle_increase()\n\n if \"line\" in kwargs:\n kwargs.setdefault(\"fillcolor\", kwargs[\"line\"][\"color\"])\n else:\n kwargs.setdefault(\"fillcolor\", _DEFAULT_INCREASING_COLOR)\n if \"name\" in kwargs:\n kwargs.setdefault(\"showlegend\", True)\n else:\n kwargs.setdefault(\"showlegend\", False)\n kwargs.setdefault(\"name\", \"Increasing\")\n kwargs.setdefault(\"line\", dict(color=_DEFAULT_INCREASING_COLOR))\n\n candle_incr_data = dict(\n type=\"box\",\n x=increase_x,\n y=increase_y,\n whiskerwidth=0,\n boxpoints=False,\n **kwargs,\n )\n\n return [candle_incr_data]\n\n", + "url": "https://github.com/plotly/plotly.py.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 165, + "n_words": 52, + "vocab_size": 41, + "complexity": 3, + "nloc": 23, + "token_counts": 145, + "n_ast_nodes": 237, + "n_identifiers": 21, + "d_id": 57816, + "documentation": { + "docstring": "\n Makes boxplot trace for increasing candlesticks\n\n _make_increasing_candle() and _make_decreasing_candle separate the\n increasing traces from the decreasing traces so kwargs (such as\n color) can be passed separately to increasing or decreasing traces\n when direction is set to 'increasing' or 'decreasing' in\n FigureFactory.create_candlestick()\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param kwargs: kwargs to be passed to increasing trace via\n plotly.graph_objs.Scatter.\n\n :rtype (list) candle_incr_data: list of the box trace for\n increasing candlesticks.\n ", + "n_words": 92, + "vocab_size": 58, + "n_whitespaces": 149, + "language": "en" + } + }, + { + "id": 97246, + "commit_id": "6c49c2ff46496809d6620ac3746262c66f02142e", + "repo": "sentry", + "path": "src/sentry/snuba/discover.py", + "file_name": "discover.py", + "fun_name": "normalize_span_histogram_resutls", + "commit_message": "ref(spans): Normalize exclusive time histogram results (#32762)\n\n* ref(spans): Normalize exclusive time histogram results\r\n\r\n* test normalized data", + "code": "def normalize_span_histogram_resutls(span, histogram_params, results):\n \n\n histogram_column = get_span_histogram_column(span, histogram_params)\n bin_name = get_function_alias(histogram_column)\n\n # zerofill and rename the columns while making sure to adjust for precision\n bucket_map = {}\n for row in results[\"data\"]:\n # we expect the bin the be an integer, this is because all floating\n # point values are rounded during the calculation\n bucket = int(row[bin_name])\n bucket_map[bucket] = row[\"count\"]\n\n new_data = []\n for i in range(histogram_params.num_buckets):\n bucket = histogram_params.start_offset + histogram_params.bucket_size * i\n row = {\"bin\": bucket, \"count\": bucket_map.get(bucket, 0)}\n if histogram_params.multiplier > 1:\n row[\"bin\"] /= float(histogram_params.multiplier)\n new_data.append(row)\n\n return new_data\n\n", + "url": "https://github.com/getsentry/sentry.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 184, + "n_words": 90, + "vocab_size": 71, + "complexity": 4, + "nloc": 15, + "token_counts": 124, + "n_ast_nodes": 203, + "n_identifiers": 22, + "d_id": 19394, + "documentation": { + "docstring": "\n Normalizes the span histogram results by renaming the columns to key and bin\n and make sure to zerofill any missing values.\n\n :param [Span] span: The span for which you want to generate the\n histograms for.\n :param HistogramParams histogram_params: The histogram parameters used.\n :param any results: The results from the histogram query that may be missing\n bins and needs to be normalized.\n ", + "n_words": 61, + "vocab_size": 42, + "n_whitespaces": 94, + "language": "en" + } + }, + { + "id": 156742, + "commit_id": "2820bae493a49cb1d0a6e376985c5473b8f04fa8", + "repo": "dask", + "path": "dask/array/core.py", + "file_name": "core.py", + "fun_name": "round", + "commit_message": "Don't include docs in ``Array`` methods, just refer to module docs (#9244)\n\nCo-authored-by: James Bourbeau ", + "code": "def round(self, decimals=0):\n \n from dask.array.routines import round\n\n return round(self, decimals=decimals)\n", + "url": "https://github.com/dask/dask.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 31, + "n_words": 10, + "vocab_size": 9, + "complexity": 1, + "nloc": 3, + "token_counts": 27, + "n_ast_nodes": 42, + "n_identifiers": 6, + "d_id": 36752, + "documentation": { + "docstring": "Return array with each element rounded to the given number of decimals.\n\n Refer to :func:`dask.array.round` for full documentation.\n\n See Also\n --------\n dask.array.round : equivalent function\n ", + "n_words": 25, + "vocab_size": 24, + "n_whitespaces": 60, + "language": "en" + } + }, + { + "id": 164632, + "commit_id": "c055dc4e6be9fc1b68d873a1ace286322dadd5e1", + "repo": "pandas", + "path": "pandas/tests/io/test_stata.py", + "file_name": "test_stata.py", + "fun_name": "test_encoding_latin1_118", + "commit_message": "TST: Don't use autouse fixture in test_stata (#45831)", + "code": "def test_encoding_latin1_118(self, datapath):\n # GH 25960\n msg = \n with tm.assert_produces_warning(UnicodeWarning) as w:\n encoded = read_stata(\n datapath(\"io\", \"data\", \"stata\", \"stata1_encoding_118.dta\")\n )\n assert len(w) == 151\n assert w[0].message.args[0] == msg\n\n expected = DataFrame([[\"Düsseldorf\"]] * 151, columns=[\"kreis1849\"])\n tm.assert_frame_equal(encoded, expected)\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 130, + "n_words": 36, + "vocab_size": 31, + "complexity": 1, + "nloc": 14, + "token_counts": 82, + "n_ast_nodes": 141, + "n_identifiers": 17, + "d_id": 39577, + "documentation": { + "docstring": "\nOne or more strings in the dta file could not be decoded using utf-8, and\nso the fallback encoding of latin-1 is being used. This can happen when a file\nhas been incorrectly encoded by Stata or some other software. You should verify\nthe string values returned are correct.", + "n_words": 49, + "vocab_size": 45, + "n_whitespaces": 46, + "language": "en" + } + }, + { + "id": 107364, + "commit_id": "6010bb43ed01c48c7c403569dd210490b236a853", + "repo": "matplotlib", + "path": "lib/matplotlib/colorbar.py", + "file_name": "colorbar.py", + "fun_name": "locator", + "commit_message": "MNT: make colorbars locators and formatters properties", + "code": "def locator(self, loc):\n \n self._long_axis().set_major_locator(loc)\n self._locator = loc\n", + "url": "https://github.com/matplotlib/matplotlib.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 28, + "n_words": 7, + "vocab_size": 7, + "complexity": 1, + "nloc": 3, + "token_counts": 23, + "n_ast_nodes": 40, + "n_identifiers": 6, + "d_id": 22709, + "documentation": { + "docstring": "\n Set the major locator being used for colorbar\n ", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 23, + "language": "en" + } + }, + { + "id": 314737, + "commit_id": "26a85c6644991f626ccce62c05665095c2577234", + "repo": "core", + "path": "tests/helpers/test_entity.py", + "file_name": "test_entity.py", + "fun_name": "test_removing_entity_unavailable", + "commit_message": "Add Entity.has_entity_name attribute (#73217)", + "code": "async def test_removing_entity_unavailable(hass):\n \n entry = er.RegistryEntry(\n entity_id=\"hello.world\",\n unique_id=\"test-unique-id\",\n platform=\"test-platform\",\n disabled_by=None,\n )\n\n ent = entity.Entity()\n ent.hass = hass\n ent.entity_id = \"hello.world\"\n ent.registry_entry = entry\n ent.async_write_ha_state()\n\n state = hass.states.get(\"hello.world\")\n assert state is not None\n assert state.state == STATE_UNKNOWN\n\n await ent.async_remove()\n\n state = hass.states.get(\"hello.world\")\n assert state is not None\n assert state.state == STATE_UNAVAILABLE\n\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 123, + "n_words": 50, + "vocab_size": 31, + "complexity": 1, + "nloc": 19, + "token_counts": 104, + "n_ast_nodes": 178, + "n_identifiers": 20, + "d_id": 113341, + "documentation": { + "docstring": "Test removing an entity that is still registered creates an unavailable state.", + "n_words": 12, + "vocab_size": 11, + "n_whitespaces": 11, + "language": "en" + } + }, + { + "id": 80597, + "commit_id": "604cbc17376620dc67df35386421835d43732a4e", + "repo": "awx", + "path": "awx/main/managers.py", + "file_name": "managers.py", + "fun_name": "capacity_values", + "commit_message": "Consume control capacity (#11665)\n\n* Select control node before start task\r\n\r\nConsume capacity on control nodes for controlling tasks and consider\r\nremainging capacity on control nodes before selecting them.\r\n\r\nThis depends on the requirement that control and hybrid nodes should all\r\nbe in the instance group named 'controlplane'. Many tests do not satisfy that\r\nrequirement. I'll update the tests in another commit.\r\n\r\n* update tests to use controlplane\r\n\r\nWe don't start any tasks if we don't have a controlplane instance group\r\n\r\nDue to updates to fixtures, update tests to set node type and capacity\r\nexplicitly so they get expected result.\r\n\r\n* Fixes for accounting of control capacity consumed\r\n\r\nUpdate method is used to account for currently consumed capacity for\r\ninstance groups in the in-memory capacity tracking data structure we initialize in\r\nafter_lock_init and then update via calculate_capacity_consumed (both in\r\ntask_manager.py)\r\n\r\nAlso update fit_task_to_instance to consider control impact on instances\r\n\r\nTrust that these functions do the right thing looking for a\r\nnode with capacity, and cut out redundant check for the whole group's\r\ncapacity per Alan's reccomendation.\r\n\r\n* Refactor now redundant code\r\n\r\nDeal with control type tasks before we loop over the preferred instance\r\ngroups, which cuts out the need for some redundant logic.\r\n\r\nAlso, fix a bug where I was missing assigning the execution node in one case!\r\n\r\n* set job explanation on tasks that need capacity\r\n\r\nmove the job explanation for jobs that need capacity to a function\r\nso we can re-use it in the three places we need it.\r\n\r\n* project updates always run on the controlplane\r\n\r\nInstance group ordering makes no sense on project updates because they\r\nalways need to run on the control plane.\r\n\r\nAlso, since hybrid nodes should always run the control processes for the\r\njobs running on them as execution nodes, account for this when looking for a\r\nexecution node.\r\n\r\n* fix misleading message\r\n\r\nthe variables and wording were both misleading, fix to be more accurate\r\ndescription in the two different cases where this log may be emitted.\r\n\r\n* use settings correctly\r\n\r\nuse settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME instead of a hardcoded\r\nname\r\ncache the controlplane_ig object during the after lock init to avoid\r\nan uneccesary query\r\neliminate mistakenly duplicated AWX_CONTROL_PLANE_TASK_IMPACT and use\r\nonly AWX_CONTROL_NODE_TASK_IMPACT\r\n\r\n* add test for control capacity consumption\r\n\r\nadd test to verify that when there are 2 jobs and only capacity for one\r\nthat one will move into waiting and the other stays in pending\r\n\r\n* add test for hybrid node capacity consumption\r\n\r\nassert that the hybrid node is used for both control and execution and\r\ncapacity is deducted correctly\r\n\r\n* add test for task.capacity_type = control\r\n\r\nTest that control type tasks have the right capacity consumed and\r\nget assigned to the right instance group\r\n\r\nAlso fix lint in the tests\r\n\r\n* jobs_running not accurate for control nodes\r\n\r\nWe can either NOT use \"idle instances\" for control nodes, or we need\r\nto update the jobs_running property on the Instance model to count\r\njobs where the node is the controller_node.\r\n\r\nI didn't do that because it may be an expensive query, and it would be\r\nhard to make it match with jobs_running on the InstanceGroup which\r\nfilters on tasks assigned to the instance group.\r\n\r\nThis change chooses to stop considering \"idle\" control nodes an option,\r\nsince we can't acurrately identify them.\r\n\r\nThe way things are without any change, is we are continuing to over consume capacity on control nodes\r\nbecause this method sees all control nodes as \"idle\" at the beginning\r\nof the task manager run, and then only counts jobs started in that run\r\nin the in-memory tracking. So jobs which last over a number of task\r\nmanager runs build up consuming capacity, which is accurately reported\r\nvia Instance.consumed_capacity\r\n\r\n* Reduce default task impact for control nodes\r\n\r\nThis is something we can experiment with as far as what users\r\nwant at install time, but start with just 1 for now.\r\n\r\n* update capacity docs\r\n\r\nDescribe usage of the new setting and the concept of control impact.\r\n\r\nCo-authored-by: Alan Rominger \r\nCo-authored-by: Rebeccah ", + "code": "def capacity_values(self, qs=None, tasks=None, breakdown=False, graph=None):\n \n if qs is None: # Optionally BYOQS - bring your own queryset\n qs = self.all().prefetch_related('instances')\n instance_ig_mapping, ig_ig_mapping = self.capacity_mapping(qs=qs)\n\n if tasks is None:\n tasks = self.model.unifiedjob_set.related.related_model.objects.filter(status__in=('running', 'waiting'))\n\n if graph is None:\n graph = {group.name: {} for group in qs}\n for group_name in graph:\n self.zero_out_group(graph, group_name, breakdown)\n for t in tasks:\n # TODO: dock capacity for isolated job management tasks running in queue\n impact = t.task_impact\n control_groups = []\n if t.controller_node:\n control_groups = instance_ig_mapping.get(t.controller_node, [])\n if not control_groups:\n logger.warn(f\"No instance group found for {t.controller_node}, capacity consumed may be innaccurate.\")\n\n if t.status == 'waiting' or (not t.execution_node and not t.is_container_group_task):\n # Subtract capacity from any peer groups that share instances\n if not t.instance_group:\n impacted_groups = []\n elif t.instance_group.name not in ig_ig_mapping:\n # Waiting job in group with 0 capacity has no collateral impact\n impacted_groups = [t.instance_group.name]\n else:\n impacted_groups = ig_ig_mapping[t.instance_group.name]\n for group_name in impacted_groups:\n if group_name not in graph:\n self.zero_out_group(graph, group_name, breakdown)\n graph[group_name]['consumed_capacity'] += impact\n capacity_type = get_capacity_type(t)\n graph[group_name][f'consumed_{capacity_type}_capacity'] += impact\n if breakdown:\n graph[group_name]['committed_capacity'] += impact\n for group_name in control_groups:\n if group_name not in graph:\n self.zero_out_group(graph, group_name, breakdown)\n graph[group_name][f'consumed_control_capacity'] += settings.AWX_CONTROL_NODE_TASK_IMPACT\n if breakdown:\n graph[group_name]['committed_capacity'] += settings.AWX_CONTROL_NODE_TASK_IMPACT\n elif t.status == 'running':\n # Subtract capacity from all groups that contain the instance\n if t.execution_node not in instance_ig_mapping:\n if not t.is_container_group_task:\n logger.warning('Detected %s running inside lost instance, ' 'may still be waiting for reaper.', t.log_format)\n if t.instance_group:\n impacted_groups = [t.instance_group.name]\n else:\n impacted_groups = []\n else:\n impacted_groups = instance_ig_mapping[t.execution_node]\n\n for group_name in impacted_groups:\n if group_name not in graph:\n self.zero_out_group(graph, group_name, breakdown)\n graph[group_name]['consumed_capacity'] += impact\n capacity_type = get_capacity_type(t)\n graph[group_name][f'consumed_{capacity_type}_capacity'] += impact\n if breakdown:\n graph[group_name]['running_capacity'] += impact\n for group_name in control_groups:\n if group_name not in graph:\n self.zero_out_group(graph, group_name, breakdown)\n graph[group_name][f'consumed_control_capacity'] += settings.AWX_CONTROL_NODE_TASK_IMPACT\n if breakdown:\n graph[group_name]['running_capacity'] += settings.AWX_CONTROL_NODE_TASK_IMPACT\n else:\n logger.error('Programming error, %s not in [\"running\", \"waiting\"]', t.log_format)\n return graph\n", + "url": "https://github.com/ansible/awx.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 18, + "n_whitespaces": 1412, + "n_words": 296, + "vocab_size": 129, + "complexity": 30, + "nloc": 65, + "token_counts": 503, + "n_ast_nodes": 817, + "n_identifiers": 42, + "d_id": 17078, + "documentation": { + "docstring": "\n Returns a dictionary of capacity values for all IGs\n ", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 24, + "language": "en" + } + }, + { + "id": 268361, + "commit_id": "4260b71cc77b7a44e061668d0d408d847f550156", + "repo": "ansible", + "path": "test/units/plugins/test_plugins.py", + "file_name": "test_plugins.py", + "fun_name": "test_all_no_duplicate_names", + "commit_message": "refactor and fixes for doc parsing (#77719)\n\n* refactor and remove redundant code in documentation\r\n\r\n allow location and building api to be more accessible\r\n fix issues with displaying ansible.legacy and ansible.builtin\r\n ensure we don't x2 process tokens (some modules reference them also) fixes #77764\r\n move to constants vs hardcoded\r\n more informative errors and comments\r\n now have actual filter/test plugins, which expose the filter/test functions\r\n moved filter/test loading/finding logic into jinja2pluginloader, removed dupe implementations\r\n added tests for case in which we unique by basename when listing\r\n\r\nUpdate lib/ansible/utils/plugin_docs.py\r\nCo-authored-by: Sloane Hertel <19572925+s-hertel@users.noreply.github.com>", + "code": "def test_all_no_duplicate_names(self, gp_mock, glob_mock):\n \n\n fixture_path = os.path.join(os.path.dirname(__file__), 'loader_fixtures')\n\n gp_mock.return_value = [\n fixture_path,\n '/path/to'\n ]\n\n glob_mock.glob.side_effect = [\n [os.path.join(fixture_path, 'import_fixture.py')],\n ['/path/to/import_fixture.py']\n ]\n\n pl = PluginLoader('test', '', 'test', 'test_plugins')\n # Aside from needing ``list()`` so we can do a len, ``PluginLoader.all`` returns a generator\n # so ``list()`` actually causes ``PluginLoader.all`` to run.\n plugins = list(pl.all())\n self.assertEqual(len(plugins), 1)\n\n self.assertIn(os.path.join(fixture_path, 'import_fixture.py'), pl._module_cache)\n self.assertNotIn('/path/to/import_fixture.py', pl._module_cache)\n", + "url": "https://github.com/ansible/ansible.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 195, + "n_words": 60, + "vocab_size": 48, + "complexity": 1, + "nloc": 15, + "token_counts": 124, + "n_ast_nodes": 209, + "n_identifiers": 23, + "d_id": 79491, + "documentation": { + "docstring": "\n This test goes along with ``test__load_module_source_no_duplicate_names``\n and ensures that we ignore duplicate imports on multiple paths\n ", + "n_words": 16, + "vocab_size": 16, + "n_whitespaces": 38, + "language": "en" + } + }, + { + "id": 268987, + "commit_id": "119cd4655d01570a70c70879dff4461ea46161bf", + "repo": "keras", + "path": "keras/utils/metrics_utils.py", + "file_name": "metrics_utils.py", + "fun_name": "binary_matches", + "commit_message": "Added util metric method for binary_matches. Decoupled from public metric binarry_acc", + "code": "def binary_matches(y_true, y_pred, threshold=0.5):\n \n y_pred = tf.convert_to_tensor(y_pred)\n threshold = tf.cast(threshold, y_pred.dtype)\n y_pred = tf.cast(y_pred > threshold, y_pred.dtype)\n return tf.cast(tf.equal(y_true, y_pred), tf.int8)", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 26, + "n_words": 21, + "vocab_size": 17, + "complexity": 1, + "nloc": 5, + "token_counts": 66, + "n_ast_nodes": 98, + "n_identifiers": 10, + "d_id": 79806, + "documentation": { + "docstring": "Creates int Tensor, 1 for label-prediction match, 0 for mismatch.\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n threshold: (Optional) Float representing the threshold for deciding whether\n prediction values are 1 or 0.\n\n Returns:\n Binary matches. shape = `[batch_size, d0, .. dN]`\n ", + "n_words": 55, + "vocab_size": 40, + "n_whitespaces": 75, + "language": "en" + } + }, + { + "id": 197113, + "commit_id": "cba899d4137b0b65f6850120ee42cd4fcd4f9dbf", + "repo": "sympy", + "path": "sympy/tensor/tensor.py", + "file_name": "tensor.py", + "fun_name": "get_matrix", + "commit_message": "Update the various tensor deprecations", + "code": "def get_matrix(self):\n \n from sympy.matrices.dense import Matrix\n deprecate_data()\n with ignore_warnings(SymPyDeprecationWarning):\n if 0 < self.rank <= 2:\n rows = self.data.shape[0]\n columns = self.data.shape[1] if self.rank == 2 else 1\n if self.rank == 2:\n mat_list = [] * rows\n for i in range(rows):\n mat_list.append([])\n for j in range(columns):\n mat_list[i].append(self[i, j])\n else:\n mat_list = [None] * rows\n for i in range(rows):\n mat_list[i] = self[i]\n return Matrix(mat_list)\n else:\n raise NotImplementedError(\n \"missing multidimensional reduction to matrix.\")\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 18, + "n_whitespaces": 401, + "n_words": 70, + "vocab_size": 49, + "complexity": 7, + "nloc": 21, + "token_counts": 148, + "n_ast_nodes": 235, + "n_identifiers": 20, + "d_id": 48346, + "documentation": { + "docstring": "\n DEPRECATED: do not use.\n\n Returns ndarray components data as a matrix, if components data are\n available and ndarray dimension does not exceed 2.\n ", + "n_words": 23, + "vocab_size": 19, + "n_whitespaces": 52, + "language": "en" + } + }, + { + "id": 241712, + "commit_id": "b56d8677ad0ff8513e566334f4a78a24b88480c3", + "repo": "lightning", + "path": "tests/callbacks/test_device_stats_monitor.py", + "file_name": "test_device_stats_monitor.py", + "fun_name": "test_device_stats_gpu_from_torch", + "commit_message": "Update test_pruning.py to use `devices` instead of `gpus` or `ipus` (#11339)", + "code": "def test_device_stats_gpu_from_torch(tmpdir):\n \n model = BoringModel()\n device_stats = DeviceStatsMonitor()\n", + "url": "https://github.com/Lightning-AI/lightning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 17, + "n_words": 8, + "vocab_size": 7, + "complexity": 1, + "nloc": 19, + "token_counts": 82, + "n_ast_nodes": 31, + "n_identifiers": 6, + "d_id": 69665, + "documentation": { + "docstring": "Test GPU stats are logged using a logger with Pytorch >= 1.8.0.", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 11, + "language": "en" + } + }, + { + "id": 21185, + "commit_id": "4b996c0fa85824b323ad9eff3364dbe2213ebb4c", + "repo": "pipenv", + "path": "pipenv/environment.py", + "file_name": "environment.py", + "fun_name": "expand_egg_links", + "commit_message": "Convert type comments to type annotations", + "code": "def expand_egg_links(self) -> None:\n \n prefixes = [\n Path(prefix)\n for prefix in self.base_paths[\"libdirs\"].split(os.pathsep)\n if vistir.path.is_in_path(prefix, self.prefix.as_posix())\n ]\n for loc in prefixes:\n if not loc.exists():\n continue\n for pth in loc.iterdir():\n if not pth.suffix == \".egg-link\":\n continue\n contents = [\n vistir.path.normalize_path(line.strip())\n for line in pth.read_text().splitlines()\n ]\n pth.write_text(\"\\n\".join(contents))\n", + "url": "https://github.com/pypa/pipenv.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 259, + "n_words": 44, + "vocab_size": 31, + "complexity": 8, + "nloc": 21, + "token_counts": 120, + "n_ast_nodes": 200, + "n_identifiers": 26, + "d_id": 3716, + "documentation": { + "docstring": "\n Expand paths specified in egg-link files to prevent pip errors during\n reinstall\n ", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 34, + "language": "en" + } + }, + { + "id": 205712, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/db/models/manager.py", + "file_name": "manager.py", + "fun_name": "deconstruct", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def deconstruct(self):\n \n qs_class = self._queryset_class\n if getattr(self, \"_built_with_as_manager\", False):\n # using MyQuerySet.as_manager()\n return (\n True, # as_manager\n None, # manager_class\n \"%s.%s\" % (qs_class.__module__, qs_class.__name__), # qs_class\n None, # args\n None, # kwargs\n )\n else:\n module_name = self.__module__\n name = self.__class__.__name__\n # Make sure it's actually there and not an inner class\n module = import_module(module_name)\n if not hasattr(module, name):\n raise ValueError(\n \"Could not find manager %s in %s.\\n\"\n \"Please note that you need to inherit from managers you \"\n \"dynamically generated with 'from_queryset()'.\"\n % (name, module_name)\n )\n return (\n False, # as_manager\n \"%s.%s\" % (module_name, name), # manager_class\n None, # qs_class\n self._constructor_args[0], # args\n self._constructor_args[1], # kwargs\n )\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 511, + "n_words": 107, + "vocab_size": 73, + "complexity": 3, + "nloc": 28, + "token_counts": 115, + "n_ast_nodes": 192, + "n_identifiers": 15, + "d_id": 51170, + "documentation": { + "docstring": "\n Return a 5-tuple of the form (as_manager (True), manager_class,\n queryset_class, args, kwargs).\n\n Raise a ValueError if the manager is dynamically generated.\n ", + "n_words": 21, + "vocab_size": 19, + "n_whitespaces": 50, + "language": "en" + } + }, + { + "id": 168085, + "commit_id": "62a69beddbedde349891378992c902c0b9341a9f", + "repo": "pandas", + "path": "pandas/core/arrays/interval.py", + "file_name": "interval.py", + "fun_name": "right", + "commit_message": "DOC: Add numpydoc SS06 validation (#47885)", + "code": "def right(self):\n \n from pandas import Index\n\n return Index(self._right, copy=False)\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 30, + "n_words": 9, + "vocab_size": 9, + "complexity": 1, + "nloc": 3, + "token_counts": 21, + "n_ast_nodes": 36, + "n_identifiers": 6, + "d_id": 40192, + "documentation": { + "docstring": "\n Return the right endpoints of each Interval in the IntervalArray as an Index.\n ", + "n_words": 13, + "vocab_size": 12, + "n_whitespaces": 28, + "language": "en" + } + }, + { + "id": 63010, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pep517/wrappers.py", + "file_name": "wrappers.py", + "fun_name": "get_requires_for_build_sdist", + "commit_message": "upd; format", + "code": "def get_requires_for_build_sdist(self, config_settings=None):\n \n return self._call_hook('get_requires_for_build_sdist', {\n 'config_settings': config_settings\n })\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 41, + "n_words": 9, + "vocab_size": 9, + "complexity": 1, + "nloc": 4, + "token_counts": 23, + "n_ast_nodes": 41, + "n_identifiers": 4, + "d_id": 13098, + "documentation": { + "docstring": "Identify packages required for building a wheel\n\n Returns a list of dependency specifications, e.g.::\n\n [\"setuptools >= 26\"]\n\n This does not include requirements specified in pyproject.toml.\n It returns the result of calling the equivalently named hook in a\n subprocess.\n ", + "n_words": 38, + "vocab_size": 33, + "n_whitespaces": 84, + "language": "en" + } + }, + { + "id": 148044, + "commit_id": "d96ac251d7c9d12fadedfdfd903dc393f5bae217", + "repo": "ray", + "path": "python/ray/ml/checkpoint.py", + "file_name": "checkpoint.py", + "fun_name": "as_directory", + "commit_message": "[air] Add `Checkpoint.as_directory()` for efficient checkpoint fs processing (#23908)\n\nThis PR adds a `Checkpoint_as_directory()` context manager that either returns the local path (if checkpoint is already a directory) or a temporary directory path containing the checkpoint data, which is cleaned up after use. The path should be considered as a read-only source for loading data from the checkpoint.\r\n\r\nA common use case for processing checkpoint data is to convert it into a directory with `Checkpoint.to_directory()` and then do some read-only processing (e.g. restoring a ML model).\r\n\r\nThis process has two flaws: First, `to_directory()` creates a temporary directory that has to be explicitly cleaned up by the user after use. Secondly, if the checkpoint is already a directory checkpoint, it is copied over, which is inefficient for large checkpoints (e.g. huggingface models) and then even more prone to unwanted side effects if not cleaned up properly. \r\n\r\nWith this context manager that effectively returns a directory that is to be used as a read-only data source, we can avoid manual cleaning up and unnecessary data copies (or avoid internal inspection as e.g. in https://github.com/ray-project/ray/pull/23876/files#diff-47db2f054ca359879f77306e7b054dd8b780aab994961e3b4911330ae15eeae3R57-R60)\r\n\r\nSee also discussion in https://github.com/ray-project/ray/pull/23850/files#r850036905", + "code": "def as_directory(self) -> Iterator[str]:\n \n if self._local_path:\n yield self._local_path\n else:\n temp_dir = self.to_directory()\n yield temp_dir\n shutil.rmtree(temp_dir, ignore_errors=True)\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 81, + "n_words": 16, + "vocab_size": 14, + "complexity": 2, + "nloc": 29, + "token_counts": 41, + "n_ast_nodes": 70, + "n_identifiers": 10, + "d_id": 34158, + "documentation": { + "docstring": "Return checkpoint directory path in a context.\n\n This function makes checkpoint data available as a directory while avoiding\n unnecessary copies and left-over temporary data.\n\n If the checkpoint is already a directory checkpoint, it will return\n the existing path. If it is not, it will create a temporary directory,\n which will be deleted after the context is exited.\n\n Users should treat the returned checkpoint directory as read-only and avoid\n changing any data within it, as it might get deleted when exiting the context.\n\n Example:\n\n with checkpoint.as_directory() as checkpoint_dir:\n # Do some read-only processing of files within checkpoint_dir\n pass\n\n # At this point, if a temporary directory was created, it will have\n # been deleted.\n\n ", + "n_words": 113, + "vocab_size": 75, + "n_whitespaces": 239, + "language": "en" + } + }, + { + "id": 64242, + "commit_id": "ce0b84f54d495fc78a6792a9b05d0eb1dc799ed2", + "repo": "erpnext", + "path": "erpnext/stock/doctype/delivery_note/delivery_note.py", + "file_name": "delivery_note.py", + "fun_name": "update_billed_amount_based_on_so", + "commit_message": "refactor: use frappe.qb instead of sql\n\n(cherry picked from commit 0a9ec9f591f8b4d0e630a3c902b69c9996f080dd)", + "code": "def update_billed_amount_based_on_so(so_detail, update_modified=True):\n\tfrom frappe.query_builder.functions import Sum\n\n\t# Billed against Sales Order directly\n\tsi = frappe.qb.DocType(\"Sales Invoice\").as_(\"si\")\n\tsi_item = frappe.qb.DocType(\"Sales Invoice Item\").as_(\"si_item\")\n\tsum_amount = Sum(si_item.amount).as_(\"amount\")\n\n\tbilled_against_so = frappe.qb.from_(si).from_(si_item).select(sum_amount).where(\n\t\t(si_item.parent == si.name) &\n\t\t(si_item.so_detail == so_detail) &\n\t\t((si_item.dn_detail.isnull()) | (si_item.dn_detail == '')) &\n\t\t(si_item.docstatus == 1) &\n\t\t(si.update_stock == 0)\n\t).run()\n\tbilled_against_so = billed_against_so and billed_against_so[0][0] or 0\n\n\t# Get all Delivery Note Item rows against the Sales Order Item row\n\n\tdn = frappe.qb.DocType(\"Delivery Note\").as_(\"dn\")\n\tdn_item = frappe.qb.DocType(\"Delivery Note Item\").as_(\"dn_item\")\n\n\tdn_details = frappe.qb.from_(dn).from_(dn_item).select(dn_item.name, dn_item.amount, dn_item.si_detail, dn_item.parent, dn_item.stock_qty, dn_item.returned_qty).where(\n\t\t(dn.name == dn_item.parent) &\n\t\t(dn_item.so_detail == so_detail) &\n\t\t(dn.docstatus == 1) &\n\t\t(dn.is_return == 0)\n\t).orderby(\n\t\tdn.posting_date, dn.posting_time, dn.name\n\t).run(as_dict=True)\n\n\tupdated_dn = []\n\tfor dnd in dn_details:\n\t\tbilled_amt_agianst_dn = 0\n\n\t\t# If delivered against Sales Invoice\n\t\tif dnd.si_detail:\n\t\t\tbilled_amt_agianst_dn = flt(dnd.amount)\n\t\t\tbilled_against_so -= billed_amt_agianst_dn\n\t\telse:\n\t\t\t# Get billed amount directly against Delivery Note\n\t\t\tbilled_amt_agianst_dn = frappe.db.sql(, dnd.name)\n\t\t\tbilled_amt_agianst_dn = billed_amt_agianst_dn and billed_amt_agianst_dn[0][0] or 0\n\n\t\t# Distribute billed amount directly against SO between DNs based on FIFO\n\t\tif billed_against_so and billed_amt_agianst_dn < dnd.amount:\n\t\t\tif dnd.returned_qty:\n\t\t\t\tpending_to_bill = flt(dnd.amount) * (dnd.stock_qty - dnd.returned_qty) / dnd.stock_qty\n\t\t\telse:\n\t\t\t\tpending_to_bill = flt(dnd.amount)\n\t\t\tpending_to_bill -= billed_amt_agianst_dn\n\t\t\tif pending_to_bill <= billed_against_so:\n\t\t\t\tbilled_amt_agianst_dn += pending_to_bill\n\t\t\t\tbilled_against_so -= pending_to_bill\n\t\t\telse:\n\t\t\t\tbilled_amt_agianst_dn += billed_against_so\n\t\t\t\tbilled_against_so = 0\n\n\t\tfrappe.db.set_value(\"Delivery Note Item\", dnd.name, \"billed_amt\", billed_amt_agianst_dn, update_modified=update_modified)\n\n\t\tupdated_dn.append(dnd.parent)\n\n\treturn updated_dn\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 19, + "n_whitespaces": 162, + "n_words": 214, + "vocab_size": 120, + "complexity": 11, + "nloc": 48, + "token_counts": 440, + "n_ast_nodes": 708, + "n_identifiers": 45, + "d_id": 13585, + "documentation": { + "docstring": "select sum(amount) from `tabSales Invoice Item`\n\t\t\t\twhere dn_detail=%s and docstatus=1", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 151551, + "commit_id": "2b6d00dde449934db8789c860d5e0e9dc9c528ab", + "repo": "freqtrade", + "path": "freqtrade/rpc/api_server/ws/channel.py", + "file_name": "channel.py", + "fun_name": "send", + "commit_message": "initial channel api change", + "code": "async def send(self, data) -> bool:\n \n try:\n await asyncio.wait_for(\n self.queue.put(data),\n timeout=self.drain_timeout\n )\n return True\n except asyncio.TimeoutError:\n return False\n", + "url": "https://github.com/freqtrade/freqtrade.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 113, + "n_words": 18, + "vocab_size": 17, + "complexity": 2, + "nloc": 13, + "token_counts": 41, + "n_ast_nodes": 68, + "n_identifiers": 11, + "d_id": 35044, + "documentation": { + "docstring": "\n Add the data to the queue to be sent.\n :returns: True if data added to queue, False otherwise\n ", + "n_words": 18, + "vocab_size": 14, + "n_whitespaces": 40, + "language": "en" + } + }, + { + "id": 248060, + "commit_id": "8a87b4435a736cd42454cad7e57b65ec911f01fa", + "repo": "synapse", + "path": "tests/storage/databases/main/test_events_worker.py", + "file_name": "test_events_worker.py", + "fun_name": "test_second_get_event_cancelled", + "commit_message": "Handle cancellation in `EventsWorkerStore._get_events_from_cache_or_db` (#12529)\n\nMultiple calls to `EventsWorkerStore._get_events_from_cache_or_db` can\r\nreuse the same database fetch, which is initiated by the first call.\r\nEnsure that cancelling the first call doesn't cancel the other calls\r\nsharing the same database fetch.\r\n\r\nSigned-off-by: Sean Quah ", + "code": "def test_second_get_event_cancelled(self):\n \n with self.blocking_get_event_calls() as (unblock, get_event1, get_event2):\n # Cancel the second `get_event` call.\n get_event2.cancel()\n # The first `get_event` call must not be cancelled.\n self.assertNoResult(get_event1)\n # The second `get_event` call gets cancelled immediately.\n exc = self.get_failure(get_event2, CancelledError).value\n self.assertIsInstance(exc, CancelledError)\n\n # Unblock the database fetch.\n unblock.callback(None)\n # The first `get_event` call should complete successfully.\n self.get_success(get_event1)\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 189, + "n_words": 54, + "vocab_size": 40, + "complexity": 1, + "nloc": 8, + "token_counts": 64, + "n_ast_nodes": 112, + "n_identifiers": 15, + "d_id": 72077, + "documentation": { + "docstring": "Test cancellation of the second `get_event` call sharing a database fetch.", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 104416, + "commit_id": "e35be138148333078284b942ccc9ed7b1d826f97", + "repo": "datasets", + "path": "src/datasets/table.py", + "file_name": "table.py", + "fun_name": "remove_column", + "commit_message": "Update docs to new frontend/UI (#3690)\n\n* WIP: update docs to new UI\r\n\r\n* make style\r\n\r\n* Rm unused\r\n\r\n* inject_arrow_table_documentation __annotations__\r\n\r\n* hasattr(arrow_table_method, \"__annotations__\")\r\n\r\n* Update task_template.rst\r\n\r\n* Codeblock PT-TF-SPLIT\r\n\r\n* Convert loading scripts\r\n\r\n* Convert docs to mdx\r\n\r\n* Fix mdx\r\n\r\n* Add \r\n\r\n* Convert mdx tables\r\n\r\n* Fix codeblock\r\n\r\n* Rm unneded hashlinks\r\n\r\n* Update index.mdx\r\n\r\n* Redo dev change\r\n\r\n* Rm circle ci `build_doc` & `deploy_doc`\r\n\r\n* Rm unneeded files\r\n\r\n* Update docs reamde\r\n\r\n* Standardize to `Example::`\r\n\r\n* mdx logging levels doc\r\n\r\n* Table properties inject_arrow_table_documentation\r\n\r\n* ``` to ```py mdx\r\n\r\n* Add Tips mdx\r\n\r\n* important,None -> \r\n\r\n* More misc\r\n\r\n* Center imgs\r\n\r\n* Update instllation page\r\n\r\n* `setup.py` docs section\r\n\r\n* Rm imgs since they are in hf.co\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* Update index mdx\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* just `Dataset` obj\r\n\r\n* Addedversion just italics\r\n\r\n* Update ReadInstruction doc example syntax\r\n\r\n* Change docstring for `prepare_for_task`\r\n\r\n* Chore\r\n\r\n* Remove `code` syntax from headings\r\n\r\n* Rm `code` syntax from headings\r\n\r\n* Hashlink backward compatability\r\n\r\n* S3FileSystem doc\r\n\r\n* S3FileSystem doc updates\r\n\r\n* index.mdx updates\r\n\r\n* Add darkmode gifs\r\n\r\n* Index logo img css classes\r\n\r\n* Index mdx dataset logo img size\r\n\r\n* Docs for DownloadMode class\r\n\r\n* Doc DownloadMode table\r\n\r\n* format docstrings\r\n\r\n* style\r\n\r\n* Add doc builder scripts (#3790)\r\n\r\n* add doc builder scripts\r\n\r\n* fix docker image\r\n\r\n* Docs new UI actions no self hosted (#3793)\r\n\r\n* No self hosted\r\n\r\n* replace doc injection by actual docstrings\r\n\r\n* Docstring formatted\r\n\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Mishig Davaadorj \r\n\r\nCo-authored-by: Lysandre Debut \r\nCo-authored-by: Mishig Davaadorj \r\n\r\n* Rm notebooks from docs actions since they dont exi\r\n\r\n* Update tsting branch\r\n\r\n* More docstring\r\n\r\n* Chore\r\n\r\n* bump up node version\r\n\r\n* bump up node\r\n\r\n* ``` -> ```py for audio_process.mdx\r\n\r\n* Update .github/workflows/build_documentation.yml\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Uodate dev doc build\r\n\r\n* remove run on PR\r\n\r\n* fix action\r\n\r\n* Fix gh doc workflow\r\n\r\n* forgot this change when merging master\r\n\r\n* Update build doc\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: Lysandre Debut ", + "code": "def remove_column(self, i, *args, **kwargs):\n \n table = self.table.remove_column(i, *args, **kwargs)\n name = self.table.column_names[i]\n blocks = []\n for tables in self.blocks:\n blocks.append(\n [\n t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t\n for t in tables\n ]\n )\n return ConcatenationTable(table, blocks)\n", + "url": "https://github.com/huggingface/datasets.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 172, + "n_words": 40, + "vocab_size": 29, + "complexity": 4, + "nloc": 12, + "token_counts": 96, + "n_ast_nodes": 145, + "n_identifiers": 14, + "d_id": 21852, + "documentation": { + "docstring": "\n Create new Table with the indicated column removed.\n\n Args:\n i (:obj:`int`):\n Index of column to remove.\n\n Returns:\n :class:`datasets.table.Table`:\n New table without the column.\n ", + "n_words": 23, + "vocab_size": 21, + "n_whitespaces": 104, + "language": "en" + } + }, + { + "id": 250282, + "commit_id": "652d1669c5a103b1c20478770c4aaf18849c09a3", + "repo": "synapse", + "path": "tests/handlers/test_e2e_room_keys.py", + "file_name": "test_e2e_room_keys.py", + "fun_name": "test_upload_room_keys_bogus_version", + "commit_message": "Add missing type hints to tests.handlers. (#14680)\n\nAnd do not allow untyped defs in tests.handlers.", + "code": "def test_upload_room_keys_bogus_version(self) -> None:\n \n version = self.get_success(\n self.handler.create_version(\n self.local_user,\n {\n \"algorithm\": \"m.megolm_backup.v1\",\n \"auth_data\": \"first_version_auth_data\",\n },\n )\n )\n self.assertEqual(version, \"1\")\n\n e = self.get_failure(\n self.handler.upload_room_keys(self.local_user, \"bogus_version\", room_keys),\n SynapseError,\n )\n res = e.value.code\n self.assertEqual(res, 404)\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 215, + "n_words": 32, + "vocab_size": 28, + "complexity": 1, + "nloc": 20, + "token_counts": 84, + "n_ast_nodes": 139, + "n_identifiers": 16, + "d_id": 73360, + "documentation": { + "docstring": "Check that we get a 404 on uploading keys when an nonexistent version\n is specified\n ", + "n_words": 15, + "vocab_size": 15, + "n_whitespaces": 29, + "language": "en" + } + }, + { + "id": 288603, + "commit_id": "47d0598e75487f63901931875f69f802a477df13", + "repo": "core", + "path": "tests/util/test_color.py", + "file_name": "test_color.py", + "fun_name": "test_white_levels_to_color_temperature", + "commit_message": "Use Kelvin as the preferred color temperature unit (#79591)\n\n* Use Kelvin as the preferred white temperature unit\r\n\r\n* Update homekit\r\n\r\n* Adjust tests", + "code": "def test_white_levels_to_color_temperature():\n \n # Only cold channel enabled -> coldest color temperature\n assert color_util._white_levels_to_color_temperature(255, 0, 2000, 6535) == (\n 6535,\n 255,\n )\n assert color_util._white_levels_to_color_temperature(128, 0, 2000, 6535) == (\n 6535,\n 128,\n )\n # Only warm channel enabled -> warmest color temperature\n assert color_util._white_levels_to_color_temperature(0, 255, 2000, 6535) == (\n 2000,\n 255,\n )\n assert color_util._white_levels_to_color_temperature(0, 128, 2000, 6535) == (\n 2000,\n 128,\n )\n assert color_util._white_levels_to_color_temperature(112, 143, 2000, 6535) == (\n 2876,\n 255,\n )\n assert color_util._white_levels_to_color_temperature(56, 72, 2000, 6535) == (\n 2872,\n 128,\n )\n # Both channels turned off -> warmest color temperature\n assert color_util._white_levels_to_color_temperature(0, 0, 2000, 6535) == (\n 2000,\n 0,\n )\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 251, + "n_words": 99, + "vocab_size": 36, + "complexity": 1, + "nloc": 29, + "token_counts": 145, + "n_ast_nodes": 197, + "n_identifiers": 3, + "d_id": 87759, + "documentation": { + "docstring": "Test warm, cold conversion to color temp.\n\n Temperature values must be in mireds\n Home Assistant uses rgbcw for rgbww\n ", + "n_words": 19, + "vocab_size": 19, + "n_whitespaces": 28, + "language": "en" + } + }, + { + "id": 220194, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/ast.py", + "file_name": "ast.py", + "fun_name": "_write_str_avoiding_backslashes", + "commit_message": "add python 3.10.4 for windows", + "code": "def _write_str_avoiding_backslashes(self, string, *, quote_types=_ALL_QUOTES):\n \n string, quote_types = self._str_literal_helper(string, quote_types=quote_types)\n quote_type = quote_types[0]\n self.write(f\"{quote_type}{string}{quote_type}\")\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 42, + "n_words": 14, + "vocab_size": 12, + "complexity": 1, + "nloc": 4, + "token_counts": 41, + "n_ast_nodes": 77, + "n_identifiers": 8, + "d_id": 55932, + "documentation": { + "docstring": "Write string literal value with a best effort attempt to avoid backslashes.", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 11, + "language": "en" + } + }, + { + "id": 275052, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/mixed_precision/loss_scale_optimizer.py", + "file_name": "loss_scale_optimizer.py", + "fun_name": "strategy_supports_loss_scaling", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def strategy_supports_loss_scaling():\n \n if not tf.distribute.has_strategy():\n return True\n strategy = tf.distribute.get_strategy()\n # Strategies are supported if either there is only one replica or if variables\n # are replicated per device. Otherwise, the current model.fit() implementation\n # and most custom training loops incorrectly unscale the gradients. Currently,\n # gradients are unscaled once per compute replica, but they should be unscaled\n # once per variable replica. When there is one variable replica for each\n # compute replica, this works fine, but otherwise issues will occur.\n # TODO(reedwm): Support all strategies.\n return isinstance(\n strategy,\n (\n tf.distribute.MultiWorkerMirroredStrategy,\n tf.compat.v1.distribute.experimental.MultiWorkerMirroredStrategy,\n tf.distribute.OneDeviceStrategy,\n tf.compat.v1.distribute.OneDeviceStrategy,\n tf.distribute.MirroredStrategy,\n tf.compat.v1.distribute.MirroredStrategy,\n ),\n )\n\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 229, + "n_words": 99, + "vocab_size": 75, + "complexity": 2, + "nloc": 15, + "token_counts": 85, + "n_ast_nodes": 136, + "n_identifiers": 13, + "d_id": 81301, + "documentation": { + "docstring": "Returns True if the current Strategy supports loss scaling.", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 72433, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/admin/views/mixins.py", + "file_name": "mixins.py", + "fun_name": "stream_csv", + "commit_message": "Reformat with black", + "code": "def stream_csv(self, queryset):\n \n writer = csv.DictWriter(Echo(), fieldnames=self.list_export)\n yield writer.writerow(\n {field: self.get_heading(queryset, field) for field in self.list_export}\n )\n\n for item in queryset:\n yield self.write_csv_row(writer, self.to_row_dict(item))\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 81, + "n_words": 24, + "vocab_size": 21, + "complexity": 3, + "nloc": 7, + "token_counts": 67, + "n_ast_nodes": 105, + "n_identifiers": 15, + "d_id": 15895, + "documentation": { + "docstring": "Generate a csv file line by line from queryset, to be used in a StreamingHTTPResponse", + "n_words": 15, + "vocab_size": 13, + "n_whitespaces": 14, + "language": "en" + } + }, + { + "id": 154619, + "commit_id": "e5b1888cd932909e49194d58035da34b210b91c4", + "repo": "modin", + "path": "modin/experimental/core/storage_formats/hdk/query_compiler.py", + "file_name": "query_compiler.py", + "fun_name": "_agg", + "commit_message": "FEAT-#4946: Replace OmniSci with HDK (#4947)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Andrey Pavlenko ", + "code": "def _agg(self, agg, axis=0, level=None, **kwargs):\n \n if level is not None or axis != 0:\n raise NotImplementedError(\n \"HDK's aggregation functions does not support 'level' and 'axis' parameters.\"\n )\n\n # TODO: Do filtering on numeric columns if `numeric_only=True`\n if not kwargs.get(\"skipna\", True) or kwargs.get(\"numeric_only\"):\n raise NotImplementedError(\n \"HDK's aggregation functions does not support 'skipna' and 'numeric_only' parameters.\"\n )\n # Processed above, so can be omitted\n kwargs.pop(\"skipna\", None)\n kwargs.pop(\"numeric_only\", None)\n\n new_frame = self._modin_frame.agg(agg)\n new_frame = new_frame._set_index(\n pandas.Index.__new__(\n pandas.Index, data=[MODIN_UNNAMED_SERIES_LABEL], dtype=\"O\"\n )\n )\n return self.__constructor__(new_frame, shape_hint=\"row\")\n", + "url": "https://github.com/modin-project/modin.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 270, + "n_words": 82, + "vocab_size": 60, + "complexity": 5, + "nloc": 18, + "token_counts": 121, + "n_ast_nodes": 202, + "n_identifiers": 20, + "d_id": 36114, + "documentation": { + "docstring": "\n Perform specified aggregation along rows/columns.\n\n Parameters\n ----------\n agg : str\n Name of the aggregation function to perform.\n axis : {0, 1}, default: 0\n Axis to perform aggregation along. 0 is to apply function against each column,\n all the columns will be reduced into a single scalar. 1 is to aggregate\n across rows.\n *Note:* HDK storage format supports aggregation for 0 axis only, aggregation\n along rows will be defaulted to pandas.\n level : None, default: None\n Serves the compatibility purpose, always have to be None.\n **kwargs : dict\n Additional parameters to pass to the aggregation function.\n\n Returns\n -------\n DFAlgQueryCompiler\n New single-column (``axis=1``) or single-row (``axis=0``) query compiler containing\n the result of aggregation.\n ", + "n_words": 111, + "vocab_size": 81, + "n_whitespaces": 299, + "language": "en" + } + }, + { + "id": 58169, + "commit_id": "3fd5aef3a1b94ac270e7325aa75d534e4f412d5c", + "repo": "prefect", + "path": "src/prefect/orion/services/loop_service.py", + "file_name": "loop_service.py", + "fun_name": "run_once", + "commit_message": "Clean up loop service methods", + "code": "async def run_once(self) -> None:\n \n raise NotImplementedError(\"LoopService subclasses must implement this method.\")\n\n", + "url": "https://github.com/PrefectHQ/prefect.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 26, + "n_words": 12, + "vocab_size": 12, + "complexity": 1, + "nloc": 11, + "token_counts": 13, + "n_ast_nodes": 27, + "n_identifiers": 3, + "d_id": 11732, + "documentation": { + "docstring": "\n Represents one loop of the service.\n\n Users should override this method.\n\n To actually run the service once, call `LoopService().start(loops=1)`\n instead of `LoopService().run_once()`, because this method will not invoke setup\n and teardown methods properly.\n ", + "n_words": 33, + "vocab_size": 30, + "n_whitespaces": 76, + "language": "en" + } + }, + { + "id": 246015, + "commit_id": "7a1cefc6e37aa583647f2804c9d9c9765712c59a", + "repo": "synapse", + "path": "tests/rest/admin/test_user.py", + "file_name": "test_user.py", + "fun_name": "test_no_auth", + "commit_message": "Add admin API to get users' account data (#11664)\n\nCo-authored-by: reivilibre ", + "code": "def test_no_auth(self) -> None:\n \n channel = self.make_request(\"GET\", self.url, {})\n\n self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.MISSING_TOKEN, channel.json_body[\"errcode\"])\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 42, + "n_words": 14, + "vocab_size": 14, + "complexity": 1, + "nloc": 5, + "token_counts": 56, + "n_ast_nodes": 90, + "n_identifiers": 13, + "d_id": 70945, + "documentation": { + "docstring": "Try to get information of a user without authentication.", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 200621, + "commit_id": "1823aa534e379b9123f70389f5818ac4d24015a0", + "repo": "sympy", + "path": "sympy/algebras/quaternion.py", + "file_name": "quaternion.py", + "fun_name": "_is_extrinsic", + "commit_message": "Changed _check_sequence to _is_extrinsic\r\n\r\nAs suggested by @smichr\n\nCo-authored-by: Christopher Smith ", + "code": "def _is_extrinsic(seq):\n \n if len(seq) != 3:\n raise ValueError(\"Expected 3 axes, got `{}`.\".format(seq))\n if type(seq) != str:\n raise ValueError('Expected seq to be a string.')\n\n intrinsic = seq.isupper()\n extrinsic = seq.islower()\n if not (intrinsic or extrinsic):\n raise ValueError(\"seq must either be fully uppercase (for extrinsic \"\n \"rotations), or fully lowercase, for intrinsic \"\n \"rotations).\")\n\n i, j, k = seq.lower()\n if (i == j) or (j == k):\n raise ValueError(\"Consecutive axes must be different\")\n\n bad = set(seq) - set('xyzXYZ')\n if bad:\n raise ValueError(\"Expected axes from `seq` to be from \"\n \"['x', 'y', 'z'] or ['X', 'Y', 'Z'], \"\n \"got {}\".format(''.join(bad)))\n\n return extrinsic\n\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 263, + "n_words": 99, + "vocab_size": 68, + "complexity": 8, + "nloc": 20, + "token_counts": 128, + "n_ast_nodes": 231, + "n_identifiers": 18, + "d_id": 49735, + "documentation": { + "docstring": "validate seq and return True if seq is lowercase and False if uppercase", + "n_words": 13, + "vocab_size": 10, + "n_whitespaces": 12, + "language": "en" + } + }, + { + "id": 31488, + "commit_id": "3eed5530ec74bb60ad9f8f612717d0f6ccf820f2", + "repo": "transformers", + "path": "src/transformers/tokenization_utils_base.py", + "file_name": "tokenization_utils_base.py", + "fun_name": "sep_token", + "commit_message": "Fix properties of unset special tokens in non verbose mode (#17797)\n\nCo-authored-by: SaulLu <55560583+SaulLu@users.noreply.github.com>", + "code": "def sep_token(self) -> str:\n \n if self._sep_token is None:\n if self.verbose:\n logger.error(\"Using sep_token, but it is not set yet.\")\n return None\n return str(self._sep_token)\n", + "url": "https://github.com/huggingface/transformers.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 80, + "n_words": 22, + "vocab_size": 19, + "complexity": 3, + "nloc": 10, + "token_counts": 35, + "n_ast_nodes": 61, + "n_identifiers": 7, + "d_id": 5763, + "documentation": { + "docstring": "\n `str`: Separation token, to separate context and query in an input sequence. Log an error if used while not\n having been set.\n ", + "n_words": 22, + "vocab_size": 21, + "n_whitespaces": 44, + "language": "en" + } + }, + { + "id": 206849, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/views/generic/dates.py", + "file_name": "dates.py", + "fun_name": "get_year", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def get_year(self):\n \n year = self.year\n if year is None:\n try:\n year = self.kwargs[\"year\"]\n except KeyError:\n try:\n year = self.request.GET[\"year\"]\n except KeyError:\n raise Http404(_(\"No year specified\"))\n return year\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 18, + "n_whitespaces": 160, + "n_words": 27, + "vocab_size": 17, + "complexity": 4, + "nloc": 11, + "token_counts": 54, + "n_ast_nodes": 96, + "n_identifiers": 9, + "d_id": 51751, + "documentation": { + "docstring": "Return the year for which this view should display data.", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 125703, + "commit_id": "de7bd015a4867317569cb0ad775015f6f35fdd1e", + "repo": "ray", + "path": "python/ray/tune/execution/trial_runner.py", + "file_name": "trial_runner.py", + "fun_name": "_validate_result_metrics", + "commit_message": "[air/tune/docs] Change Tuner() occurences in rest of ray/tune (#26961)", + "code": "def _validate_result_metrics(self, result):\n \n if int(os.environ.get(\"TUNE_DISABLE_STRICT_METRIC_CHECKING\", 0)) != 1 and (\n len({k for k in result if k not in list(DEBUG_METRICS) + [DONE]}) > 1\n ):\n base_metric = self._metric if self._metric != DEFAULT_METRIC else None\n scheduler_metric = (\n self._scheduler_alg.metric\n if self._scheduler_alg.metric != DEFAULT_METRIC\n else None\n )\n search_metrics = (\n self._search_alg.metric\n if self._search_alg.metric != DEFAULT_METRIC\n else None\n )\n\n if isinstance(search_metrics, str):\n search_metrics = [search_metrics]\n\n if base_metric and base_metric not in result:\n report_metric = base_metric\n location = \"tune.TuneConfig()\"\n elif scheduler_metric and scheduler_metric not in result:\n report_metric = scheduler_metric\n location = type(self._scheduler_alg).__name__\n elif search_metrics and any(\n search_metric not in result for search_metric in search_metrics\n ):\n report_metric = list(\n filter(\n lambda search_metric: search_metric not in result,\n search_metrics,\n )\n )\n if len(report_metric) == 1:\n report_metric = report_metric[0]\n location = type(self._search_alg).__name__\n else:\n report_metric = None\n location = None\n\n if report_metric:\n raise ValueError(\n \"Trial returned a result which did not include the \"\n \"specified metric(s) `{}` that `{}` expects. \"\n \"Make sure your calls to `tune.report()` include the \"\n \"metric, or set the \"\n \"TUNE_DISABLE_STRICT_METRIC_CHECKING \"\n \"environment variable to 1. Result: {}\".format(\n report_metric, location, result\n )\n )\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 902, + "n_words": 179, + "vocab_size": 88, + "complexity": 18, + "nloc": 49, + "token_counts": 238, + "n_ast_nodes": 379, + "n_identifiers": 31, + "d_id": 27954, + "documentation": { + "docstring": "\n Check if any of the required metrics was not reported\n in the last result. If the only items are ``done`` or any of\n DEBUG_METRICS, this means that no result was ever received and\n the trial just returned. This is also okay and will not raise\n an error.\n\n This will ignore checking for the DEFAULT_METRIC.\n ", + "n_words": 54, + "vocab_size": 43, + "n_whitespaces": 104, + "language": "en" + } + }, + { + "id": 204301, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/contrib/sessions/backends/db.py", + "file_name": "db.py", + "fun_name": "save", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def save(self, must_create=False):\n \n if self.session_key is None:\n return self.create()\n data = self._get_session(no_load=must_create)\n obj = self.create_model_instance(data)\n using = router.db_for_write(self.model, instance=obj)\n try:\n with transaction.atomic(using=using):\n obj.save(\n force_insert=must_create, force_update=not must_create, using=using\n )\n except IntegrityError:\n if must_create:\n raise CreateError\n raise\n except DatabaseError:\n if not must_create:\n raise UpdateError\n raise\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 244, + "n_words": 43, + "vocab_size": 34, + "complexity": 6, + "nloc": 19, + "token_counts": 103, + "n_ast_nodes": 168, + "n_identifiers": 23, + "d_id": 50688, + "documentation": { + "docstring": "\n Save the current session data to the database. If 'must_create' is\n True, raise a database error if the saving operation doesn't create a\n new entry (as opposed to possibly updating an existing entry).\n ", + "n_words": 33, + "vocab_size": 29, + "n_whitespaces": 62, + "language": "en" + } + }, + { + "id": 43921, + "commit_id": "d48a3a357fd89ec805d086d5b6c1f1d4daf77b9a", + "repo": "airflow", + "path": "tests/conftest.py", + "file_name": "conftest.py", + "fun_name": "dag_maker", + "commit_message": "Add TaskMap and TaskInstance.map_id (#20286)\n\nCo-authored-by: Ash Berlin-Taylor ", + "code": "def dag_maker(request):\n \n import lazy_object_proxy\n\n # IMPORTANT: Delay _all_ imports from `airflow.*` to _inside a method_.\n # This fixture is \"called\" early on in the pytest collection process, and\n # if we import airflow.* here the wrong (non-test) config will be loaded\n # and \"baked\" in to various constants\n\n want_serialized = False\n\n # Allow changing default serialized behaviour with `@pytest.mark.need_serialized_dag` or\n # `@pytest.mark.need_serialized_dag(False)`\n serialized_marker = request.node.get_closest_marker(\"need_serialized_dag\")\n if serialized_marker:\n (want_serialized,) = serialized_marker.args or (True,)\n\n from airflow.utils.log.logging_mixin import LoggingMixin\n", + "url": "https://github.com/apache/airflow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 119, + "n_words": 76, + "vocab_size": 60, + "complexity": 4, + "nloc": 25, + "token_counts": 100, + "n_ast_nodes": 83, + "n_identifiers": 13, + "d_id": 8096, + "documentation": { + "docstring": "\n The dag_maker helps us to create DAG, DagModel, and SerializedDAG automatically.\n\n You have to use the dag_maker as a context manager and it takes\n the same argument as DAG::\n\n with dag_maker(dag_id=\"mydag\") as dag:\n task1 = DummyOperator(task_id='mytask')\n task2 = DummyOperator(task_id='mytask2')\n\n If the DagModel you want to use needs different parameters than the one\n automatically created by the dag_maker, you have to update the DagModel as below::\n\n dag_maker.dag_model.is_active = False\n session.merge(dag_maker.dag_model)\n session.commit()\n\n For any test you use the dag_maker, make sure to create a DagRun::\n\n dag_maker.create_dagrun()\n\n The dag_maker.create_dagrun takes the same arguments as dag.create_dagrun\n\n If you want to operate on serialized DAGs, then either pass ``serialized=True` to the ``dag_maker()``\n call, or you can mark your test/class/file with ``@pytest.mark.need_serialized_dag(True)``. In both of\n these cases the ``dag`` returned by the context manager will be a lazily-evaluated proxy object to the\n SerializedDAG.\n ", + "n_words": 137, + "vocab_size": 90, + "n_whitespaces": 231, + "language": "en" + } + }, + { + "id": 160188, + "commit_id": "f404e9e92e87a3990712d723d5c562a89300ac01", + "repo": "numpy", + "path": "numpy/lib/function_base.py", + "file_name": "function_base.py", + "fun_name": "rot90", + "commit_message": "Add space after argument name", + "code": "def rot90(m, k=1, axes=(0, 1)):\n \n axes = tuple(axes)\n if len(axes) != 2:\n raise ValueError(\"len(axes) must be 2.\")\n\n m = asanyarray(m)\n\n if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim:\n raise ValueError(\"Axes must be different.\")\n\n if (axes[0] >= m.ndim or axes[0] < -m.ndim\n or axes[1] >= m.ndim or axes[1] < -m.ndim):\n raise ValueError(\"Axes={} out of range for array of ndim={}.\"\n .format(axes, m.ndim))\n\n k %= 4\n\n if k == 0:\n return m[:]\n if k == 2:\n return flip(flip(m, axes[0]), axes[1])\n\n axes_list = arange(0, m.ndim)\n (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]],\n axes_list[axes[0]])\n\n if k == 1:\n return transpose(flip(m, axes[1]), axes_list)\n else:\n # k == 3\n return flip(transpose(m, axes_list), axes[1])\n\n", + "url": "https://github.com/numpy/numpy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 265, + "n_words": 105, + "vocab_size": 68, + "complexity": 11, + "nloc": 79, + "token_counts": 250, + "n_ast_nodes": 377, + "n_identifiers": 15, + "d_id": 38560, + "documentation": { + "docstring": "\n Rotate an array by 90 degrees in the plane specified by axes.\n\n Rotation direction is from the first towards the second axis.\n\n Parameters\n ----------\n m : array_like\n Array of two or more dimensions.\n k : integer\n Number of times the array is rotated by 90 degrees.\n axes : (2,) array_like\n The array is rotated in the plane defined by the axes.\n Axes must be different.\n\n .. versionadded:: 1.12.0\n\n Returns\n -------\n y : ndarray\n A rotated view of `m`.\n\n See Also\n --------\n flip : Reverse the order of elements in an array along the given axis.\n fliplr : Flip an array horizontally.\n flipud : Flip an array vertically.\n\n Notes\n -----\n ``rot90(m, k=1, axes=(1,0))`` is the reverse of\n ``rot90(m, k=1, axes=(0,1))``\n\n ``rot90(m, k=1, axes=(1,0))`` is equivalent to\n ``rot90(m, k=-1, axes=(0,1))``\n\n Examples\n --------\n >>> m = np.array([[1,2],[3,4]], int)\n >>> m\n array([[1, 2],\n [3, 4]])\n >>> np.rot90(m)\n array([[2, 4],\n [1, 3]])\n >>> np.rot90(m, 2)\n array([[4, 3],\n [2, 1]])\n >>> m = np.arange(8).reshape((2,2,2))\n >>> np.rot90(m, 1, (1,2))\n array([[[1, 3],\n [0, 2]],\n [[5, 7],\n [4, 6]]])\n\n ", + "n_words": 170, + "vocab_size": 108, + "n_whitespaces": 378, + "language": "en" + } + }, + { + "id": 244350, + "commit_id": "9a3bf7660e6ced54672741095f96df07919f9ba7", + "repo": "mmdetection", + "path": "mmdet/models/dense_heads/dense_test_mixins.py", + "file_name": "dense_test_mixins.py", + "fun_name": "aug_test_rpn", + "commit_message": "[Refactor] Refactor dense head outputs to InstanceResults.", + "code": "def aug_test_rpn(self, feats, img_metas):\n \n samples_per_gpu = len(img_metas[0])\n aug_proposals = [[] for _ in range(samples_per_gpu)]\n for x, img_meta in zip(feats, img_metas):\n results_list = self.simple_test_rpn(x, img_meta)\n for i, results in enumerate(results_list):\n proposals = torch.cat(\n [results.bboxes, results.scores[:, None]], dim=-1)\n aug_proposals[i].append(proposals)\n # reorganize the order of 'img_metas' to match the dimensions\n # of 'aug_proposals'\n aug_img_metas = []\n for i in range(samples_per_gpu):\n aug_img_meta = []\n for j in range(len(img_metas)):\n aug_img_meta.append(img_metas[j][i])\n aug_img_metas.append(aug_img_meta)\n # after merging, proposals will be rescaled to the original image size\n\n merged_proposals = []\n for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas):\n merged_proposal = merge_aug_proposals(proposals, aug_img_meta,\n self.test_cfg)\n results = InstanceData()\n results.bboxes = merged_proposal[:, :4]\n results.scores = merged_proposal[:, 4]\n merged_proposals.append(results)\n return merged_proposals\n\n if sys.version_info >= (3, 7):\n", + "url": "https://github.com/open-mmlab/mmdetection.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 421, + "n_words": 111, + "vocab_size": 77, + "complexity": 7, + "nloc": 24, + "token_counts": 206, + "n_ast_nodes": 335, + "n_identifiers": 34, + "d_id": 70342, + "documentation": { + "docstring": "Test with augmentation for only for ``RPNHead`` and its variants,\n e.g., ``GARPNHead``, etc.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n img_metas (list[dict]): Meta info of each image.\n\n Returns:\n list[Tensor]: Proposals of each image, each item has shape (n, 5),\n where 5 represent (tl_x, tl_y, br_x, br_y, score).\n ", + "n_words": 52, + "vocab_size": 47, + "n_whitespaces": 151, + "language": "en" + } + }, + { + "id": 64314, + "commit_id": "a3e69cf75d27198132d05c7c10475a0297b1e190", + "repo": "erpnext", + "path": "erpnext/utilities/bulk_transaction.py", + "file_name": "bulk_transaction.py", + "fun_name": "show_job_status", + "commit_message": "feat: Bulk Transaction Processing (#28580)\n\n* feat: Bulk Transaction Processing\r\n\r\n* fix: add flags to ignore validations and exception handling correction\r\n\r\n* fix: remove duplicate code, added logger functionality and improved notifications\r\n\r\n* fix: linting and sider issues\r\n\r\n* test: added tests\r\n\r\n* fix: linter issues\r\n\r\n* fix: failing test case\r\n\r\n* fix: sider issues and test cases\r\n\r\n* refactor: mapping function calls to create order/invoice\r\n\r\n* fix: added more test cases to increase coverage\r\n\r\n* fix: test cases\r\n\r\n* fix: sider issue\r\n\r\n* fix: rename doctype, improve formatting and minor refactor\r\n\r\n* fix: update doctype name in hooks and sider issues\r\n\r\n* fix: entry log test case\r\n\r\n* fix: typos, translations and company name in tests\r\n\r\n* fix: linter issues and translations\r\n\r\n* fix: linter issue\r\n\r\n* fix: split into separate function for marking failed transaction\r\n\r\n* fix: typos, retry failed transaction logic and make log read only\r\n\r\n* fix: hide retry button when no failed transactions and remove test cases not rrelevant\r\n\r\n* fix: sider issues and indentation to tabs\r\n\r\nCo-authored-by: Ankush Menat ", + "code": "def show_job_status(failed_history, deserialized_data, to_doctype):\n\tif not failed_history:\n\t\tfrappe.msgprint(\n\t\t\t_(\"Creation of {0} successful\").format(to_doctype),\n\t\t\ttitle=\"Successful\",\n\t\t\tindicator=\"green\",\n\t\t)\n\n\tif len(failed_history) != 0 and len(failed_history) < len(deserialized_data):\n\t\tfrappe.msgprint(\n\t\t\t_().format(\n\t\t\t\tto_doctype\n\t\t\t),\n\t\t\ttitle=\"Partially successful\",\n\t\t\tindicator=\"orange\",\n\t\t)\n\n\tif len(failed_history) == len(deserialized_data):\n\t\tfrappe.msgprint(\n\t\t\t_().format(\n\t\t\t\tto_doctype\n\t\t\t),\n\t\t\ttitle=\"Failed\",\n\t\t\tindicator=\"red\",\n\t\t)\n\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 19, + "n_words": 42, + "vocab_size": 30, + "complexity": 5, + "nloc": 25, + "token_counts": 111, + "n_ast_nodes": 187, + "n_identifiers": 11, + "d_id": 13603, + "documentation": { + "docstring": "Creation of {0} partially successful.\n\t\t\t\tCheck Bulk Transaction LogCreation of {0} failed.\n\t\t\t\tCheck Bulk Transaction Log", + "n_words": 18, + "vocab_size": 12, + "n_whitespaces": 15, + "language": "en" + } + }, + { + "id": 212228, + "commit_id": "560a57e166a1f54319df57127502d48ee4ecc72e", + "repo": "bokeh", + "path": "bokeh/models/util/structure.py", + "file_name": "structure.py", + "fun_name": "_make_prop_dict", + "commit_message": "Generalize filtering on CDS views (#12054)\n\n* Generalize filtering on CDS views\r\n\r\n* Add type information to us_{counties,states}\r\n\r\n* Add plotting/file/filtering example\r\n\r\n* Add a migration note\r\n\r\n* Update models.util.structure and tests\r\n\r\n* Fix a pandas' deprecation warning\r\n\r\n* Update CDSView.{filters->filter}\r\n\r\n* Update documentation\r\n\r\n* Add more unit tests for BitSet (Indices)\r\n\r\n* Add unit tests\r\n\r\n* Add CDSView.filters back-compat to bokehjs", + "code": "def _make_prop_dict(self) -> pd.DataFrame:\n \n pd = import_required(\"pandas\", \"Structure graphs require Pandas (http://pandas.pydata.org) to be installed\")\n df = pd.DataFrame()\n for x in self._graph.nodes(data=True):\n M = self._model.select_one(dict(id=x[0]))\n Z = pd.DataFrame(self._obj_props_to_df2(M))\n Z[\"id\"] = x[0]\n Z[\"model\"] = str(M)\n Z[\"values\"] = Z[\"values\"].map(lambda x: str(x))\n Z[\"types\"] = Z[\"types\"].map(lambda x: str(x))\n df = pd.concat([df, Z])\n return df\n", + "url": "https://github.com/bokeh/bokeh.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 162, + "n_words": 50, + "vocab_size": 38, + "complexity": 2, + "nloc": 16, + "token_counts": 142, + "n_ast_nodes": 239, + "n_identifiers": 20, + "d_id": 53207, + "documentation": { + "docstring": " Returns a dataframe containing all the properties of all the submodels of the model being\n analyzed. Used as datasource to show attributes.\n\n ", + "n_words": 22, + "vocab_size": 18, + "n_whitespaces": 37, + "language": "en" + } + }, + { + "id": 242436, + "commit_id": "a0e1fde1eddf45f26653e2ff6080d31e177adbec", + "repo": "Pillow", + "path": "src/PIL/ImageFile.py", + "file_name": "ImageFile.py", + "fun_name": "encode_to_file", + "commit_message": "Added PyEncoder", + "code": "def encode_to_file(self, fh, bufsize):\n \n errcode = 0\n while errcode == 0:\n status, errcode, buf = self.encode(bufsize)\n if status > 0:\n fh.write(buf[status:])\n return errcode\n", + "url": "https://github.com/python-pillow/Pillow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 88, + "n_words": 23, + "vocab_size": 19, + "complexity": 3, + "nloc": 7, + "token_counts": 47, + "n_ast_nodes": 75, + "n_identifiers": 9, + "d_id": 69858, + "documentation": { + "docstring": "\n :param fh: File handle.\n :param bufsize: Buffer size.\n\n :returns: If finished successfully, return 0.\n Otherwise, return an error code. Err codes are from\n :data:`.ImageFile.ERRORS`.\n ", + "n_words": 24, + "vocab_size": 22, + "n_whitespaces": 75, + "language": "en" + } + }, + { + "id": 276953, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/utils/layer_utils.py", + "file_name": "layer_utils.py", + "fun_name": "count_params", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def count_params(weights):\n \n unique_weights = {id(w): w for w in weights}.values()\n # Ignore TrackableWeightHandlers, which will not have a shape defined.\n unique_weights = [w for w in unique_weights if hasattr(w, \"shape\")]\n weight_shapes = [w.shape.as_list() for w in unique_weights]\n standardized_weight_shapes = [\n [0 if w_i is None else w_i for w_i in w] for w in weight_shapes\n ]\n return int(sum(np.prod(p) for p in standardized_weight_shapes))\n\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 93, + "n_words": 62, + "vocab_size": 39, + "complexity": 9, + "nloc": 8, + "token_counts": 93, + "n_ast_nodes": 145, + "n_identifiers": 17, + "d_id": 81800, + "documentation": { + "docstring": "Count the total number of scalars composing the weights.\n\n Args:\n weights: An iterable containing the weights on which to compute params\n\n Returns:\n The total number of scalars composing the weights\n ", + "n_words": 30, + "vocab_size": 21, + "n_whitespaces": 53, + "language": "en" + } + }, + { + "id": 266015, + "commit_id": "e7f54c5867cf49126bbf95e28633e4283c2bbcb2", + "repo": "netbox", + "path": "netbox/extras/plugins/templates.py", + "file_name": "templates.py", + "fun_name": "render", + "commit_message": "Reorganize plugin resources", + "code": "def render(self, template_name, extra_context=None):\n \n if extra_context is None:\n extra_context = {}\n elif not isinstance(extra_context, dict):\n raise TypeError(\"extra_context must be a dictionary\")\n\n return get_template(template_name).render({**self.context, **extra_context})\n", + "url": "https://github.com/netbox-community/netbox.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 74, + "n_words": 24, + "vocab_size": 23, + "complexity": 3, + "nloc": 6, + "token_counts": 53, + "n_ast_nodes": 87, + "n_identifiers": 9, + "d_id": 78268, + "documentation": { + "docstring": "\n Convenience method for rendering the specified Django template using the default context data. An additional\n context dictionary may be passed as `extra_context`.\n ", + "n_words": 22, + "vocab_size": 20, + "n_whitespaces": 44, + "language": "en" + } + }, + { + "id": 24488, + "commit_id": "ddaa2c2552e19635cd6cdf38619f1f176c358f89", + "repo": "PaddleOCR", + "path": "ppstructure/table/table_master_match.py", + "file_name": "table_master_match.py", + "fun_name": "deal_bb", + "commit_message": "add SLANet", + "code": "def deal_bb(result_token):\n \n # find out parts.\n thead_pattern = '(.*?)'\n if re.search(thead_pattern, result_token) is None:\n return result_token\n thead_part = re.search(thead_pattern, result_token).group()\n origin_thead_part = copy.deepcopy(thead_part)\n\n # check \"rowspan\" or \"colspan\" occur in parts or not .\n span_pattern = \"|||\"\n span_iter = re.finditer(span_pattern, thead_part)\n span_list = [s.group() for s in span_iter]\n has_span_in_head = True if len(span_list) > 0 else False\n\n if not has_span_in_head:\n # not include \"rowspan\" or \"colspan\" branch 1.\n # 1. replace to , and to \n # 2. it is possible to predict text include or by Text-line recognition,\n # so we replace to , and to \n thead_part = thead_part.replace('', '')\\\n .replace('', '')\\\n .replace('', '')\\\n .replace('', '')\n else:\n # include \"rowspan\" or \"colspan\" branch 2.\n # Firstly, we deal rowspan or colspan cases.\n # 1. replace > to >\n # 2. replace to \n # 3. it is possible to predict text include or by Text-line recognition,\n # so we replace to , and to \n\n # Secondly, deal ordinary cases like branch 1\n\n # replace \">\" to \"\"\n replaced_span_list = []\n for sp in span_list:\n replaced_span_list.append(sp.replace('>', '>'))\n for sp, rsp in zip(span_list, replaced_span_list):\n thead_part = thead_part.replace(sp, rsp)\n\n # replace \"\" to \"\"\n thead_part = thead_part.replace('', '')\n\n # remove duplicated by re.sub\n mb_pattern = \"()+\"\n single_b_string = \"\"\n thead_part = re.sub(mb_pattern, single_b_string, thead_part)\n\n mgb_pattern = \"()+\"\n single_gb_string = \"\"\n thead_part = re.sub(mgb_pattern, single_gb_string, thead_part)\n\n # ordinary cases like branch 1\n thead_part = thead_part.replace('', '').replace('',\n '')\n\n # convert back to , empty cell has no .\n # but space cell( ) is suitable for \n thead_part = thead_part.replace('', '')\n # deal with duplicated \n thead_part = deal_duplicate_bb(thead_part)\n # deal with isolate span tokens, which causes by wrong predict by structure prediction.\n # eg.PMC5994107_011_00.png\n thead_part = deal_isolate_span(thead_part)\n # replace original result with new thead part.\n result_token = result_token.replace(origin_thead_part, thead_part)\n return result_token\n\n", + "url": "https://github.com/PaddlePaddle/PaddleOCR.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 720, + "n_words": 324, + "vocab_size": 170, + "complexity": 7, + "nloc": 35, + "token_counts": 264, + "n_ast_nodes": 484, + "n_identifiers": 30, + "d_id": 4740, + "documentation": { + "docstring": "\n In our opinion, always occurs in text's context.\n This function will find out all tokens in and insert by manual.\n :param result_token:\n :return:\n ", + "n_words": 27, + "vocab_size": 24, + "n_whitespaces": 43, + "language": "en" + } + }, + { + "id": 298979, + "commit_id": "8745401af59da209e6304911f81e5416d4f18bd7", + "repo": "core", + "path": "homeassistant/components/zha/climate.py", + "file_name": "climate.py", + "fun_name": "_rm_rs_action", + "commit_message": "Use climate enums in zha (#70754)", + "code": "def _rm_rs_action(self) -> HVACAction | None:\n \n\n if (running_state := self._thrm.running_state) is None:\n return None\n if running_state & (\n T.RunningState.Heat_State_On | T.RunningState.Heat_2nd_Stage_On\n ):\n return HVACAction.HEATING\n if running_state & (\n T.RunningState.Cool_State_On | T.RunningState.Cool_2nd_Stage_On\n ):\n return HVACAction.COOLING\n if running_state & (\n T.RunningState.Fan_State_On\n | T.RunningState.Fan_2nd_Stage_On\n | T.RunningState.Fan_3rd_Stage_On\n ):\n return HVACAction.FAN\n if running_state & T.RunningState.Idle:\n return HVACAction.IDLE\n if self.hvac_mode != HVACMode.OFF:\n return HVACAction.IDLE\n return HVACAction.OFF\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 258, + "n_words": 60, + "vocab_size": 33, + "complexity": 7, + "nloc": 23, + "token_counts": 124, + "n_ast_nodes": 193, + "n_identifiers": 22, + "d_id": 97918, + "documentation": { + "docstring": "Return the current HVAC action based on running mode and running state.", + "n_words": 12, + "vocab_size": 11, + "n_whitespaces": 11, + "language": "en" + } + }, + { + "id": 321317, + "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", + "repo": "qutebrowser", + "path": "qutebrowser/misc/miscwidgets.py", + "file_name": "miscwidgets.py", + "fun_name": "mousePressEvent", + "commit_message": "Run scripts/dev/rewrite_enums.py", + "code": "def mousePressEvent(self, e):\n \n if e.button() == Qt.MouseButton.LeftButton:\n e.accept()\n self.toggle()\n else:\n super().mousePressEvent(e)\n\n", + "url": "https://github.com/qutebrowser/qutebrowser.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 65, + "n_words": 11, + "vocab_size": 11, + "complexity": 2, + "nloc": 6, + "token_counts": 41, + "n_ast_nodes": 72, + "n_identifiers": 10, + "d_id": 117657, + "documentation": { + "docstring": "Toggle the fold if the widget was pressed.\n\n Args:\n e: The QMouseEvent.\n ", + "n_words": 12, + "vocab_size": 11, + "n_whitespaces": 37, + "language": "en" + } + }, + { + "id": 174994, + "commit_id": "99eab68bf959e4c71c2688e4b1675ce9147ee785", + "repo": "pip", + "path": "src/pip/_vendor/pygments/formatters/img.py", + "file_name": "img.py", + "fun_name": "get_text_size", + "commit_message": "Upgrade pygments to 2.13.0", + "code": "def get_text_size(self, text):\n \n font = self.fonts['NORMAL']\n if hasattr(font, 'getbbox'): # Pillow >= 9.2.0\n return font.getbbox(text)[2:4]\n else:\n return font.getsize(text)\n", + "url": "https://github.com/pypa/pip.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 69, + "n_words": 18, + "vocab_size": 17, + "complexity": 2, + "nloc": 6, + "token_counts": 45, + "n_ast_nodes": 77, + "n_identifiers": 8, + "d_id": 41534, + "documentation": { + "docstring": "\n Get the text size (width, height).\n ", + "n_words": 6, + "vocab_size": 6, + "n_whitespaces": 21, + "language": "en" + } + }, + { + "id": 246925, + "commit_id": "02d708568b476f2f7716000b35c0adfa4cbd31b3", + "repo": "synapse", + "path": "tests/rest/client/test_upgrade_room.py", + "file_name": "test_upgrade_room.py", + "fun_name": "test_power_levels", + "commit_message": "Replace assertEquals and friends with non-deprecated versions. (#12092)", + "code": "def test_power_levels(self):\n \n # The other user doesn't have the proper power level.\n channel = self._upgrade_room(self.other_token)\n self.assertEqual(403, channel.code, channel.result)\n\n # Increase the power levels so that this user can upgrade.\n power_levels = self.helper.get_state(\n self.room_id,\n \"m.room.power_levels\",\n tok=self.creator_token,\n )\n power_levels[\"users\"][self.other] = 100\n self.helper.send_state(\n self.room_id,\n \"m.room.power_levels\",\n body=power_levels,\n tok=self.creator_token,\n )\n\n # The upgrade should succeed!\n channel = self._upgrade_room(self.other_token)\n self.assertEqual(200, channel.code, channel.result)\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 224, + "n_words": 56, + "vocab_size": 39, + "complexity": 1, + "nloc": 17, + "token_counts": 109, + "n_ast_nodes": 172, + "n_identifiers": 17, + "d_id": 71409, + "documentation": { + "docstring": "\n Another user can upgrade the room if their power level is increased.\n ", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 27, + "language": "en" + } + }, + { + "id": 176628, + "commit_id": "de1d00f20e0bc14f1cc911b3486e50225a8fa168", + "repo": "networkx", + "path": "networkx/generators/classic.py", + "file_name": "classic.py", + "fun_name": "wheel_graph", + "commit_message": "Adjust the usage of nodes_or_number decorator (#5599)\n\n* recorrect typo in decorators.py\r\n\r\n* Update tests to show troubles in current code\r\n\r\n* fix troubles with usage of nodes_or_number\r\n\r\n* fix typo\r\n\r\n* remove nodes_or_number where that makes sense\r\n\r\n* Reinclude nodes_or_numbers and add some tests for nonstandard usage\r\n\r\n* fix typowq\r\n\r\n* hopefully final tweaks (no behavior changes\r\n\r\n* Update test_classic.py\r\n\r\nCo-authored-by: Jarrod Millman ", + "code": "def wheel_graph(n, create_using=None):\n \n _, nodes = n\n G = empty_graph(nodes, create_using)\n if G.is_directed():\n raise NetworkXError(\"Directed Graph not supported\")\n\n if len(nodes) > 1:\n hub, *rim = nodes\n G.add_edges_from((hub, node) for node in rim)\n if len(rim) > 1:\n G.add_edges_from(pairwise(rim, cyclic=True))\n return G\n\n", + "url": "https://github.com/networkx/networkx.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 97, + "n_words": 40, + "vocab_size": 32, + "complexity": 5, + "nloc": 11, + "token_counts": 86, + "n_ast_nodes": 139, + "n_identifiers": 16, + "d_id": 42010, + "documentation": { + "docstring": "Return the wheel graph\n\n The wheel graph consists of a hub node connected to a cycle of (n-1) nodes.\n\n Parameters\n ----------\n n : int or iterable\n If an integer, node labels are 0 to n with center 0.\n If an iterable of nodes, the center is the first.\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Node labels are the integers 0 to n - 1.\n ", + "n_words": 76, + "vocab_size": 51, + "n_whitespaces": 117, + "language": "en" + } + }, + { + "id": 337582, + "commit_id": "23c0341262bd396a3ba9265614b3818d6e08a6c1", + "repo": "accelerate", + "path": "tests/test_examples.py", + "file_name": "test_examples.py", + "fun_name": "test_checkpointing_by_steps", + "commit_message": "Refactor tests to use accelerate launch (#373)\n\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", + "code": "def test_checkpointing_by_steps(self):\n testargs = f.split()\n _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE, env=os.environ)\n self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"step_4\")))\n", + "url": "https://github.com/huggingface/accelerate.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 34, + "n_words": 14, + "vocab_size": 13, + "complexity": 1, + "nloc": 8, + "token_counts": 61, + "n_ast_nodes": 104, + "n_identifiers": 18, + "d_id": 121081, + "documentation": { + "docstring": "\n examples/by_feature/checkpointing.py\n --checkpointing_steps 2\n --output_dir {self.tmpdir}\n ", + "n_words": 5, + "vocab_size": 5, + "n_whitespaces": 34, + "language": "en" + } + }, + { + "id": 316275, + "commit_id": "7cd68381f1d4f58930ffd631dfbfc7159d459832", + "repo": "core", + "path": "tests/components/soma/test_config_flow.py", + "file_name": "test_config_flow.py", + "fun_name": "test_exception", + "commit_message": "Search/replace RESULT_TYPE_* by FlowResultType enum (#74642)", + "code": "async def test_exception(hass):\n \n flow = config_flow.SomaFlowHandler()\n flow.hass = hass\n with patch.object(SomaApi, \"list_devices\", side_effect=RequestException()):\n result = await flow.async_step_import({\"host\": MOCK_HOST, \"port\": MOCK_PORT})\n assert result[\"type\"] == data_entry_flow.FlowResultType.ABORT\n assert result[\"reason\"] == \"connection_error\"\n\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 53, + "n_words": 28, + "vocab_size": 24, + "complexity": 1, + "nloc": 7, + "token_counts": 69, + "n_ast_nodes": 124, + "n_identifiers": 17, + "d_id": 114853, + "documentation": { + "docstring": "Test if RequestException fires when no connection can be made.", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 266036, + "commit_id": "ea6d86e6c4bb6037465410db6205a7471bc81a6c", + "repo": "netbox", + "path": "netbox/extras/tests/test_customfields.py", + "file_name": "test_customfields.py", + "fun_name": "test_cf_data", + "commit_message": "Closes #10052: The cf attribute now returns deserialized custom field data", + "code": "def test_cf_data(self):\n \n site = Site(name='Test Site', slug='test-site')\n\n # Check custom field data on new instance\n site.custom_field_data['foo'] = 'abc'\n self.assertEqual(site.cf['foo'], 'abc')\n\n # Check custom field data from database\n site.save()\n site = Site.objects.get(name='Test Site')\n self.assertEqual(site.cf['foo'], 'abc')\n", + "url": "https://github.com/netbox-community/netbox.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 97, + "n_words": 34, + "vocab_size": 24, + "complexity": 1, + "nloc": 7, + "token_counts": 69, + "n_ast_nodes": 129, + "n_identifiers": 12, + "d_id": 78273, + "documentation": { + "docstring": "\n Check that custom field data is present on the instance immediately after being set and after being fetched\n from the database.\n ", + "n_words": 21, + "vocab_size": 18, + "n_whitespaces": 43, + "language": "en" + } + }, + { + "id": 34277, + "commit_id": "841d979190319098adc8101f9820a02ee3be4c8b", + "repo": "transformers", + "path": "src/transformers/models/realm/tokenization_realm.py", + "file_name": "tokenization_realm.py", + "fun_name": "tokenize", + "commit_message": "Add FastTokenizer to REALM (#15211)\n\n* Remove BertTokenizer abstraction\r\n\r\n* Add FastTokenizer to REALM\r\n\r\n* Fix config archive map\r\n\r\n* Fix copies\r\n\r\n* Update realm.mdx\r\n\r\n* Apply suggestions from code review", + "code": "def tokenize(self, text, never_split=None):\n \n # union() returns a new set by concatenating the two sets.\n never_split = self.never_split.union(set(never_split)) if never_split else self.never_split\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n if self.tokenize_chinese_chars:\n text = self._tokenize_chinese_chars(text)\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if token not in never_split:\n if self.do_lower_case:\n token = token.lower()\n if self.strip_accents is not False:\n token = self._run_strip_accents(token)\n elif self.strip_accents:\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token, never_split))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens\n", + "url": "https://github.com/huggingface/transformers.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 396, + "n_words": 141, + "vocab_size": 89, + "complexity": 8, + "nloc": 18, + "token_counts": 140, + "n_ast_nodes": 232, + "n_identifiers": 21, + "d_id": 6238, + "documentation": { + "docstring": "\n Basic Tokenization of a piece of text. Split on \"white spaces\" only, for sub-word tokenization, see\n WordPieceTokenizer.\n\n Args:\n never_split (`List[str]`, *optional*)\n Kept for backward compatibility purposes. Now implemented directly at the base class level (see\n [`PreTrainedTokenizer.tokenize`]) List of token not to split.\n ", + "n_words": 42, + "vocab_size": 39, + "n_whitespaces": 112, + "language": "en" + } + }, + { + "id": 289733, + "commit_id": "2f1138562720cd50343d2fedd4981913a9ef6bd9", + "repo": "core", + "path": "homeassistant/components/mqtt/mixins.py", + "file_name": "mixins.py", + "fun_name": "async_mqtt_connect", + "commit_message": "Add typing hints for MQTT mixins (#80702)\n\n* Add typing hints for MQTT mixins\r\n\r\n* Follow up comments\r\n\r\n* config_entry is always set\r\n\r\n* typing discovery_data - substate None assignment\r\n\r\n* Rename `config[CONF_DEVICE]` -> specifications", + "code": "def async_mqtt_connect(self) -> None:\n \n if not self.hass.is_stopping:\n self.async_write_ha_state()\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 33, + "n_words": 8, + "vocab_size": 8, + "complexity": 2, + "nloc": 4, + "token_counts": 21, + "n_ast_nodes": 38, + "n_identifiers": 5, + "d_id": 88869, + "documentation": { + "docstring": "Update state on connection/disconnection to MQTT broker.", + "n_words": 7, + "vocab_size": 7, + "n_whitespaces": 6, + "language": "en" + } + }, + { + "id": 78814, + "commit_id": "8a7e0884d789449ddbbd08ddae48374d92a14d11", + "repo": "wagtail", + "path": "wagtail/tests/test_rich_text.py", + "file_name": "test_rich_text.py", + "fun_name": "test_count_characters", + "commit_message": "Finish implementing rich text max length with identical client & server count", + "code": "def test_count_characters(self):\n \n validator = RichTextMaxLengthValidator(50)\n self.assertEqual(validator.clean(\"

Plain text

\"), 10)\n # HTML entities should be un-escaped.\n self.assertEqual(validator.clean(\"

There's quote

\"), 13)\n # BR should be ignored.\n self.assertEqual(validator.clean(\"

Line
break

\"), 9)\n # Content over multiple blocks should be treated as a single line of text with no joiner.\n self.assertEqual(validator.clean(\"

Multi

blocks

\"), 11)\n # Empty blocks should be ignored.\n self.assertEqual(validator.clean(\"

Empty

blocks

\"), 11)\n # HR should be ignored.\n self.assertEqual(validator.clean(\"

With


HR

\"), 6)\n # Embed blocks should be ignored.\n self.assertEqual(validator.clean(\"

With

embed

\"), 9)\n # Counts symbols with multiple code units (heart unicode + variation selector).\n self.assertEqual(validator.clean(\"

U+2764 U+FE0F ❤️

\"), 16)\n # Counts symbols with zero-width joiners.\n self.assertEqual(validator.clean(\"

👨‍👨‍👧

\"), 5)\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 223, + "n_words": 90, + "vocab_size": 61, + "complexity": 1, + "nloc": 11, + "token_counts": 129, + "n_ast_nodes": 229, + "n_identifiers": 6, + "d_id": 16821, + "documentation": { + "docstring": "Keep those tests up-to-date with MaxLength tests client-side.", + "n_words": 8, + "vocab_size": 7, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 94500, + "commit_id": "7bbb85a0d95d23620228a02bb4401fc09658f5f1", + "repo": "sentry", + "path": "tests/sentry/sentry_metrics/test_all_indexers.py", + "file_name": "test_all_indexers.py", + "fun_name": "test_already_created_plus_written_results", + "commit_message": "ref(metrics): Split caching out of indexers, random test refactoring [sns-1606] (#37714)", + "code": "def test_already_created_plus_written_results(indexer, indexer_cache) -> None:\n \n org_id = 1234\n\n raw_indexer = indexer\n indexer = CachingIndexer(indexer_cache, indexer)\n\n v0 = raw_indexer.record(use_case_id, org_id, \"v1.2.0\")\n v1 = raw_indexer.record(use_case_id, org_id, \"v1.2.1\")\n v2 = raw_indexer.record(use_case_id, org_id, \"v1.2.2\")\n\n expected_mapping = {\"v1.2.0\": v0, \"v1.2.1\": v1, \"v1.2.2\": v2}\n\n results = indexer.bulk_record(\n use_case_id=use_case_id, org_strings={org_id: {\"v1.2.0\", \"v1.2.1\", \"v1.2.2\"}}\n )\n assert len(results[org_id]) == len(expected_mapping) == 3\n\n for string, id in results[org_id].items():\n assert expected_mapping[string] == id\n\n results = indexer.bulk_record(\n use_case_id=use_case_id,\n org_strings={org_id: {\"v1.2.0\", \"v1.2.1\", \"v1.2.2\", \"v1.2.3\"}},\n )\n v3 = raw_indexer.resolve(use_case_id, org_id, \"v1.2.3\")\n expected_mapping[\"v1.2.3\"] = v3\n\n assert len(results[org_id]) == len(expected_mapping) == 4\n\n for string, id in results[org_id].items():\n assert expected_mapping[string] == id\n\n fetch_meta = results.get_fetch_metadata()\n assert_fetch_type_for_tag_string_set(\n fetch_meta[org_id], FetchType.CACHE_HIT, {\"v1.2.0\", \"v1.2.1\", \"v1.2.2\"}\n )\n assert_fetch_type_for_tag_string_set(fetch_meta[org_id], FetchType.FIRST_SEEN, {\"v1.2.3\"})\n\n", + "url": "https://github.com/getsentry/sentry.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 216, + "n_words": 108, + "vocab_size": 62, + "complexity": 3, + "nloc": 32, + "token_counts": 257, + "n_ast_nodes": 411, + "n_identifiers": 27, + "d_id": 19097, + "documentation": { + "docstring": "\n Test that we correctly combine db read results with db write results\n for the same organization.\n ", + "n_words": 16, + "vocab_size": 14, + "n_whitespaces": 26, + "language": "en" + } + }, + { + "id": 61474, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py", + "file_name": "file_cache.py", + "fun_name": "url_to_file_path", + "commit_message": "upd; format", + "code": "def url_to_file_path(url, filecache):\n \n key = CacheController.cache_url(url)\n return filecache._fn(key)\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 17, + "n_words": 8, + "vocab_size": 8, + "complexity": 1, + "nloc": 3, + "token_counts": 23, + "n_ast_nodes": 39, + "n_identifiers": 7, + "d_id": 12586, + "documentation": { + "docstring": "Return the file cache path based on the URL.\n\n This does not ensure the file exists!\n ", + "n_words": 16, + "vocab_size": 13, + "n_whitespaces": 22, + "language": "en" + } + }, + { + "id": 40175, + "commit_id": "c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c", + "repo": "dash", + "path": "dash/_validate.py", + "file_name": "_validate.py", + "fun_name": "validate_multi_return", + "commit_message": "f-strings everywhere! fffff", + "code": "def validate_multi_return(outputs_list, output_value, callback_id):\n if not isinstance(output_value, (list, tuple)):\n raise exceptions.InvalidCallbackReturnValue(\n dedent(\n f\n )\n )\n\n if len(output_value) != len(outputs_list):\n raise exceptions.InvalidCallbackReturnValue(\n f\n )\n\n for i, outi in enumerate(outputs_list):\n if isinstance(outi, list):\n vi = output_value[i]\n if not isinstance(vi, (list, tuple)):\n raise exceptions.InvalidCallbackReturnValue(\n dedent(\n f\n )\n )\n\n if len(vi) != len(outi):\n raise exceptions.InvalidCallbackReturnValue(\n dedent(\n f\n )\n )\n\n", + "url": "https://github.com/plotly/dash.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 19, + "n_whitespaces": 361, + "n_words": 55, + "vocab_size": 30, + "complexity": 7, + "nloc": 43, + "token_counts": 122, + "n_ast_nodes": 275, + "n_identifiers": 15, + "d_id": 7339, + "documentation": { + "docstring": "\n The callback {callback_id} is a multi-output.\n Expected the output type to be a list or tuple but got:\n {output_value!r}.\n \n Invalid number of output values for {callback_id}.\n Expected {len(outputs_list)}, got {len(output_value)}\n \n The callback {callback_id} output {i} is a wildcard multi-output.\n Expected the output type to be a list or tuple but got:\n {vi!r}.\n output spec: {outi!r}\n \n Invalid number of output values for {callback_id} item {i}.\n Expected {len(vi)}, got {len(outi)}\n output spec: {outi!r}\n output value: {vi!r}\n ", + "n_words": 74, + "vocab_size": 38, + "n_whitespaces": 401, + "language": "en" + } + }, + { + "id": 262862, + "commit_id": "1a7d704ffbabb433007e3ba04750c2f13ade48e5", + "repo": "pyinstaller", + "path": "tests/unit/test_isolation.py", + "file_name": "test_isolation.py", + "fun_name": "test_pipe_leakage", + "commit_message": "Fix typos (#6782) [skip ci]", + "code": "def test_pipe_leakage():\n \n\n from psutil import Process\n parent = Process()\n\n # Get this platform's *count open handles* method.\n open_fds = parent.num_handles if os.name == \"nt\" else parent.num_fds\n old = open_fds()\n\n # Creating an isolated.Python() does nothing.\n child = isolated.Python()\n assert open_fds() == old\n\n # Entering its context creates the child process and 4 handles for sending/receiving to/from it. Then on Windows,\n # opening the parent's ends of the two pipes creates another two handles and starting the subprocess adds another\n # two (although I don't know what for).\n EXPECTED_INCREASE_IN_FDS = (4 if os.name != \"nt\" else 8)\n\n with child:\n assert open_fds() == old + EXPECTED_INCREASE_IN_FDS\n # Exiting must close them all immediately. No implicit closure by garbage collect.\n assert open_fds() == old\n\n # Do it again just to be sure that the context manager properly restarts.\n with child:\n assert open_fds() == old + EXPECTED_INCREASE_IN_FDS\n assert open_fds() == old\n\n", + "url": "https://github.com/pyinstaller/pyinstaller.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 218, + "n_words": 147, + "vocab_size": 96, + "complexity": 3, + "nloc": 14, + "token_counts": 94, + "n_ast_nodes": 171, + "n_identifiers": 14, + "d_id": 77412, + "documentation": { + "docstring": "\n There is a finite number of open pipes/file handles/file descriptors allowed per process. Ensure that all\n opened handles eventually get closed to prevent such *leakages* causing crashes in very long processes (such as\n the rest of our test suite).\n ", + "n_words": 39, + "vocab_size": 38, + "n_whitespaces": 52, + "language": "en" + } + }, + { + "id": 56293, + "commit_id": "c33f87fc7e0b6fb4714a88b492e7545f4dbd821f", + "repo": "prefect", + "path": "tests/utilities/test_collections.py", + "file_name": "test_collections.py", + "fun_name": "test_visit_collection_with_private_pydantic", + "commit_message": "get private attrs working", + "code": "async def test_visit_collection_with_private_pydantic(self):\n \n input = PrivatePydantic(x=1)\n input._y = 2\n input._z = 4\n\n result = await visit_collection(\n input, visit_fn=visit_even_numbers, return_data=False\n )\n assert result is None\n assert EVEN == {2, 4}\n\n result = await visit_collection(\n input, visit_fn=negative_even_numbers, return_data=True\n )\n assert result == input\n assert result.__private_attributes__ == input.__private_attributes__\n breakpoint()\n assert result._y == -2\n assert result._z == -4\n\n", + "url": "https://github.com/PrefectHQ/prefect.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 181, + "n_words": 54, + "vocab_size": 33, + "complexity": 1, + "nloc": 17, + "token_counts": 95, + "n_ast_nodes": 150, + "n_identifiers": 16, + "d_id": 11499, + "documentation": { + "docstring": "Check that we successfully capture private pydantic fields", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 60651, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_internal/commands/show.py", + "file_name": "show.py", + "fun_name": "search_packages_info", + "commit_message": "upd; format", + "code": "def search_packages_info(query):\n # type: (List[str]) -> Iterator[Dict[str, str]]\n \n installed = {}\n for p in pkg_resources.working_set:\n installed[canonicalize_name(p.project_name)] = p\n\n query_names = [canonicalize_name(name) for name in query]\n missing = sorted(\n [name for name, pkg in zip(query, query_names) if pkg not in installed]\n )\n if missing:\n logger.warning('Package(s) not found: %s', ', '.join(missing))\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 94, + "n_words": 49, + "vocab_size": 37, + "complexity": 28, + "nloc": 58, + "token_counts": 483, + "n_ast_nodes": 130, + "n_identifiers": 17, + "d_id": 12227, + "documentation": { + "docstring": "\n Gather details from installed distributions. Print distribution name,\n version, location, and installed files. Installed files requires a\n pip generated 'installed-files.txt' in the distributions '.egg-info'\n directory.\n ", + "n_words": 25, + "vocab_size": 24, + "n_whitespaces": 41, + "language": "en" + } + }, + { + "id": 66552, + "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", + "repo": "erpnext", + "path": "erpnext/patches/v11_0/refactor_naming_series.py", + "file_name": "refactor_naming_series.py", + "fun_name": "get_series_to_preserve", + "commit_message": "style: format code with black", + "code": "def get_series_to_preserve(doctype):\n\tseries_to_preserve = frappe.db.sql_list(\n\t\t.format(\n\t\t\tdoctype=doctype\n\t\t)\n\t)\n\tseries_to_preserve.sort()\n\treturn series_to_preserve\n\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 4, + "n_words": 12, + "vocab_size": 10, + "complexity": 1, + "nloc": 8, + "token_counts": 29, + "n_ast_nodes": 50, + "n_identifiers": 8, + "d_id": 14213, + "documentation": { + "docstring": "select distinct naming_series from `tab{doctype}` where ifnull(naming_series, '') != ''", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 123306, + "commit_id": "63977ebdffb9e54978da337a7ec3ce8200723162", + "repo": "sqlmap", + "path": "lib/core/common.py", + "file_name": "common.py", + "fun_name": "extractRegexResult", + "commit_message": "Minor update", + "code": "def extractRegexResult(regex, content, flags=0):\n \n\n retVal = None\n\n if regex and content and \"?P\" in regex:\n if isinstance(content, six.binary_type) and isinstance(regex, six.text_type):\n regex = getBytes(regex)\n\n match = re.search(regex, content, flags)\n\n if match:\n retVal = match.group(\"result\")\n\n return retVal\n", + "url": "https://github.com/sqlmapproject/sqlmap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 91, + "n_words": 36, + "vocab_size": 25, + "complexity": 7, + "nloc": 9, + "token_counts": 74, + "n_ast_nodes": 119, + "n_identifiers": 14, + "d_id": 27333, + "documentation": { + "docstring": "\n Returns 'result' group value from a possible match with regex on a given\n content\n\n >>> extractRegexResult(r'a(?P[^g]+)g', 'abcdefg')\n 'bcdef'\n >>> extractRegexResult(r'a(?P[^g]+)g', 'ABCDEFG', re.I)\n 'BCDEF'\n ", + "n_words": 23, + "vocab_size": 20, + "n_whitespaces": 45, + "language": "en" + } + }, + { + "id": 33630, + "commit_id": "59407bbeb31fff8340938768051c9daabd38d7a7", + "repo": "transformers", + "path": "src/transformers/models/deformable_detr/modeling_deformable_detr.py", + "file_name": "modeling_deformable_detr.py", + "fun_name": "loss_cardinality", + "commit_message": "Add Deformable DETR (#17281)\n\n* First draft\r\n\r\n* More improvements\r\n\r\n* Improve model, add custom CUDA code\r\n\r\n* Import torch before\r\n\r\n* Add script that imports custom layer\r\n\r\n* Add everything in new ops directory\r\n\r\n* Import custom layer in modeling file\r\n\r\n* Fix ARCHIVE_MAP typo\r\n\r\n* Creating the custom kernel on the fly.\r\n\r\n* Import custom layer in modeling file\r\n\r\n* More improvements\r\n\r\n* Fix CUDA loading\r\n\r\n* More improvements\r\n\r\n* Improve conversion script\r\n\r\n* Improve conversion script\r\n\r\n* Make it work until encoder_outputs\r\n\r\n* Make forward pass work\r\n\r\n* More improvements\r\n\r\n* Make logits match original implementation\r\n\r\n* Make implementation also support single_scale model\r\n\r\n* Add support for single_scale and dilation checkpoint\r\n\r\n* Add support for with_box_refine model\r\n\r\n* Support also two stage model\r\n\r\n* Improve tests\r\n\r\n* Fix more tests\r\n\r\n* Make more tests pass\r\n\r\n* Upload all models to the hub\r\n\r\n* Clean up some code\r\n\r\n* Improve decoder outputs\r\n\r\n* Rename intermediate hidden states and reference points\r\n\r\n* Improve model outputs\r\n\r\n* Move tests to dedicated folder\r\n\r\n* Improve model outputs\r\n\r\n* Fix retain_grad test\r\n\r\n* Improve docs\r\n\r\n* Clean up and make test_initialization pass\r\n\r\n* Improve variable names\r\n\r\n* Add copied from statements\r\n\r\n* Improve docs\r\n\r\n* Fix style\r\n\r\n* Improve docs\r\n\r\n* Improve docs, move tests to model folder\r\n\r\n* Fix rebase\r\n\r\n* Remove DetrForSegmentation from auto mapping\r\n\r\n* Apply suggestions from code review\r\n\r\n* Improve variable names and docstrings\r\n\r\n* Apply some more suggestions from code review\r\n\r\n* Apply suggestion from code review\r\n\r\n* better docs and variables names\r\n\r\n* hint to num_queries and two_stage confusion\r\n\r\n* remove asserts and code refactor\r\n\r\n* add exception if two_stage is True and with_box_refine is False\r\n\r\n* use f-strings\r\n\r\n* Improve docs and variable names\r\n\r\n* Fix code quality\r\n\r\n* Fix rebase\r\n\r\n* Add require_torch_gpu decorator\r\n\r\n* Add pip install ninja to CI jobs\r\n\r\n* Apply suggestion of @sgugger\r\n\r\n* Remove DeformableDetrForObjectDetection from auto mapping\r\n\r\n* Remove DeformableDetrModel from auto mapping\r\n\r\n* Add model to toctree\r\n\r\n* Add model back to mappings, skip model in pipeline tests\r\n\r\n* Apply @sgugger's suggestion\r\n\r\n* Fix imports in the init\r\n\r\n* Fix copies\r\n\r\n* Add CPU implementation\r\n\r\n* Comment out GPU function\r\n\r\n* Undo previous change\r\n\r\n* Apply more suggestions\r\n\r\n* Remove require_torch_gpu annotator\r\n\r\n* Fix quality\r\n\r\n* Add logger.info\r\n\r\n* Fix logger\r\n\r\n* Fix variable names\r\n\r\n* Fix initializaztion\r\n\r\n* Add missing initialization\r\n\r\n* Update checkpoint name\r\n\r\n* Add model to doc tests\r\n\r\n* Add CPU/GPU equivalence test\r\n\r\n* Add Deformable DETR to pipeline tests\r\n\r\n* Skip model for object detection pipeline\r\n\r\nCo-authored-by: Nicolas Patry \r\nCo-authored-by: Nouamane Tazi \r\nCo-authored-by: Sylvain Gugger ", + "code": "def loss_cardinality(self, outputs, targets, indices, num_boxes):\n \n logits = outputs[\"logits\"]\n device = logits.device\n target_lengths = torch.as_tensor([len(v[\"class_labels\"]) for v in targets], device=device)\n # Count the number of predictions that are NOT \"no-object\" (which is the last class)\n card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1)\n card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float())\n losses = {\"cardinality_error\": card_err}\n return losses\n", + "url": "https://github.com/huggingface/transformers.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 115, + "n_words": 52, + "vocab_size": 45, + "complexity": 2, + "nloc": 8, + "token_counts": 104, + "n_ast_nodes": 167, + "n_identifiers": 23, + "d_id": 6125, + "documentation": { + "docstring": "\n Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes.\n\n This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients.\n ", + "n_words": 32, + "vocab_size": 29, + "n_whitespaces": 54, + "language": "en" + } + }, + { + "id": 248428, + "commit_id": "1e453053cb12ff084fdcdc2f75c08ced274dff21", + "repo": "synapse", + "path": "tests/handlers/test_message.py", + "file_name": "test_message.py", + "fun_name": "test_duplicated_txn_id_one_call", + "commit_message": "Rename storage classes (#12913)", + "code": "def test_duplicated_txn_id_one_call(self):\n \n\n txn_id = \"something_else_suitably_random\"\n\n # Create two duplicate events to persist at the same time\n event1, context1 = self._create_duplicate_event(txn_id)\n event2, context2 = self._create_duplicate_event(txn_id)\n\n # Ensure their event IDs are different to start with\n self.assertNotEqual(event1.event_id, event2.event_id)\n\n events, _ = self.get_success(\n self._persist_event_storage_controller.persist_events(\n [(event1, context1), (event2, context2)]\n )\n )\n\n # Check that we've deduplicated the events.\n self.assertEqual(len(events), 2)\n self.assertEqual(events[0].event_id, events[1].event_id)\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 179, + "n_words": 58, + "vocab_size": 49, + "complexity": 1, + "nloc": 12, + "token_counts": 99, + "n_ast_nodes": 158, + "n_identifiers": 17, + "d_id": 72277, + "documentation": { + "docstring": "Test that we correctly handle duplicates that we try and persist at\n the same time.\n ", + "n_words": 15, + "vocab_size": 13, + "n_whitespaces": 29, + "language": "en" + } + }, + { + "id": 176433, + "commit_id": "f11068c0115ede0c7b631f771c10be7efd0b950b", + "repo": "networkx", + "path": "networkx/algorithms/tests/test_polynomials.py", + "file_name": "test_polynomials.py", + "fun_name": "test_tutte_polynomial_disjoint_K1", + "commit_message": "Add Tutte polynomial (#5265)\n\nAdd a new polynomial module to algorithms for characteristic polynomials.\r\nAdds the Tutte polynomial, which is computed and ultimate represented as a\r\nsympy expression.\r\n\r\nCo-authored-by: Dan Schult \r\nCo-authored-by: Ross Barnowski ", + "code": "def test_tutte_polynomial_disjoint_K1():\n \n g = nx.complete_graph(1)\n t_g = nx.tutte_polynomial(g)\n h = nx.disjoint_union(g, g)\n t_h = nx.tutte_polynomial(h)\n assert sympy.simplify(t_g * t_g).equals(t_h)\n\n", + "url": "https://github.com/networkx/networkx.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 37, + "n_words": 19, + "vocab_size": 16, + "complexity": 1, + "nloc": 6, + "token_counts": 53, + "n_ast_nodes": 88, + "n_identifiers": 12, + "d_id": 41896, + "documentation": { + "docstring": "Tutte polynomial factors into the Tutte polynomials of its components.\n Verify this property with the disjoint union of two copies of `K_1`.\n ", + "n_words": 22, + "vocab_size": 18, + "n_whitespaces": 28, + "language": "en" + } + }, + { + "id": 9559, + "commit_id": "7375ee364e0df2a417f92593e09557f1b2a3575a", + "repo": "insightface", + "path": "reconstruction/ostec/utils/ganfit_camera.py", + "file_name": "ganfit_camera.py", + "fun_name": "perspective", + "commit_message": "initialize ostec", + "code": "def perspective(aspect_ratio, fov_y, near_clip, far_clip):\n \n focal_lengths_y = 1.0 / np.tan(fov_y * (math.pi / 360.0))\n depth_range = far_clip - near_clip\n p_22 = -(far_clip + near_clip) / depth_range\n p_23 = -2.0 * (far_clip * near_clip / depth_range)\n\n zeros = np.zeros_like(p_23, dtype=np.float32)\n # pyformat: disable\n perspective_transform = np.concatenate(\n [\n focal_lengths_y / aspect_ratio, zeros, zeros, zeros,\n zeros, focal_lengths_y, zeros, zeros,\n zeros, zeros, p_22, p_23,\n zeros, zeros, -np.ones_like(p_23, dtype=np.float32), zeros\n ], axis=0)\n # pyformat: enable\n perspective_transform = np.reshape(perspective_transform, [4, 4, -1])\n return np.transpose(perspective_transform, [2, 0, 1])\n", + "url": "https://github.com/deepinsight/insightface.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 160, + "n_words": 81, + "vocab_size": 53, + "complexity": 1, + "nloc": 15, + "token_counts": 167, + "n_ast_nodes": 229, + "n_identifiers": 23, + "d_id": 1634, + "documentation": { + "docstring": "Computes perspective transformation matrices.\n Functionality mimes gluPerspective (third_party/GL/glu/include/GLU/glu.h).\n Args:\n aspect_ratio: float value specifying the image aspect ratio (width/height).\n fov_y: 1-D float32 Tensor with shape [batch_size] specifying output vertical\n field of views in degrees.\n near_clip: 1-D float32 Tensor with shape [batch_size] specifying near\n clipping plane distance.\n far_clip: 1-D float32 Tensor with shape [batch_size] specifying far clipping\n plane distance.\n Returns:\n A [batch_size, 4, 4] float tensor that maps from right-handed points in eye\n space to left-handed points in clip space.\n ", + "n_words": 78, + "vocab_size": 56, + "n_whitespaces": 147, + "language": "en" + } + }, + { + "id": 101138, + "commit_id": "26e26c628803e592ce876e101a45033c87a5a97b", + "repo": "faceswap", + "path": "lib/cli/launcher.py", + "file_name": "launcher.py", + "fun_name": "_test_tkinter", + "commit_message": "Update TF to 2.9\n - Update TF to 2.7 to 2.9\n - Bump dependencies\n - Remove decode from pynvml calls\n - force keras predict functions to non-verbose\n - update tests\n - update Tensorboard logging\n - Update docs", + "code": "def _test_tkinter(cls) -> None:\n \n try:\n import tkinter # noqa pylint: disable=unused-import,import-outside-toplevel\n except ImportError as err:\n logger.error(\"It looks like TkInter isn't installed for your OS, so the GUI has been \"\n \"disabled. To enable the GUI please install the TkInter application. You \"\n \"can try:\")\n logger.info(\"Anaconda: conda install tk\")\n logger.info(\"Windows/macOS: Install ActiveTcl Community Edition from \"\n \"http://www.activestate.com\")\n logger.info(\"Ubuntu/Mint/Debian: sudo apt install python3-tk\")\n logger.info(\"Arch: sudo pacman -S tk\")\n logger.info(\"CentOS/Redhat: sudo yum install tkinter\")\n logger.info(\"Fedora: sudo dnf install python3-tkinter\")\n raise FaceswapError(\"TkInter not found\") from err\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 274, + "n_words": 82, + "vocab_size": 67, + "complexity": 2, + "nloc": 26, + "token_counts": 69, + "n_ast_nodes": 141, + "n_identifiers": 9, + "d_id": 20568, + "documentation": { + "docstring": " If the user is running the GUI, test whether the tkinter app is available on their\n machine. If not exit gracefully.\n\n This avoids having to import every tkinter function within the GUI in a wrapper and\n potentially spamming traceback errors to console.\n\n Raises\n ------\n FaceswapError\n If tkinter cannot be imported\n ", + "n_words": 50, + "vocab_size": 41, + "n_whitespaces": 111, + "language": "en" + } + }, + { + "id": 163673, + "commit_id": "acd7218f67fbe31308db7482e11fb9c8f30b51a8", + "repo": "pandas", + "path": "pandas/core/base.py", + "file_name": "base.py", + "fun_name": "is_monotonic_increasing", + "commit_message": "DEPR: Index.is_monotonic for Index.is_monotonic_increasing (#45422)", + "code": "def is_monotonic_increasing(self) -> bool:\n \n from pandas import Index\n\n return Index(self).is_monotonic_increasing\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 31, + "n_words": 10, + "vocab_size": 10, + "complexity": 1, + "nloc": 11, + "token_counts": 19, + "n_ast_nodes": 34, + "n_identifiers": 5, + "d_id": 39484, + "documentation": { + "docstring": "\n Return boolean if values in the object are\n monotonic_increasing.\n\n Returns\n -------\n bool\n ", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 55, + "language": "en" + } + }, + { + "id": 320623, + "commit_id": "9c4169c7b7d96a10012a72c70fc38c6154f7481f", + "repo": "qutebrowser", + "path": "tests/conftest.py", + "file_name": "conftest.py", + "fun_name": "check_yaml_c_exts", + "commit_message": "tests: Remove some unused imports", + "code": "def check_yaml_c_exts():\n \n if testutils.ON_CI and sys.version_info[:2] != (3, 10):\n from yaml import CLoader # pylint: disable=unused-import\n\n\n@pytest.hookimpl(tryfirst=True, hookwrapper=True)", + "url": "https://github.com/qutebrowser/qutebrowser.git", + "language": "Python", + "ast_errors": "@pytest.hookimpl(tryfirst=True, hookwrapper=True)", + "n_ast_errors": 1, + "ast_levels": 9, + "n_whitespaces": 31, + "n_words": 18, + "vocab_size": 18, + "complexity": 3, + "nloc": 3, + "token_counts": 28, + "n_ast_nodes": 68, + "n_identifiers": 11, + "d_id": 117234, + "documentation": { + "docstring": "Make sure PyYAML C extensions are available on CI.\n\n Not available yet with a nightly Python, see:\n https://github.com/yaml/pyyaml/issues/416\n ", + "n_words": 18, + "vocab_size": 17, + "n_whitespaces": 27, + "language": "en" + } + }, + { + "id": 276584, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/tests/temporal_sample_weights_correctness_test.py", + "file_name": "temporal_sample_weights_correctness_test.py", + "fun_name": "custom_generator_multi_io_temporal", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def custom_generator_multi_io_temporal(self, sample_weights=None):\n \n batch_size = 3\n num_samples = 3\n iteration = 0\n while True:\n batch_index = iteration * batch_size % num_samples\n iteration += 1\n start = batch_index\n end = start + batch_size\n x = [self.x[start:end], self.x[start:end]]\n y = [self.y1[start:end], self.y2[start:end]]\n if sample_weights:\n sw = tf.nest.map_structure(\n lambda w: w[start:end], sample_weights\n )\n else:\n sw = None\n yield x, y, sw\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 256, + "n_words": 58, + "vocab_size": 39, + "complexity": 3, + "nloc": 18, + "token_counts": 116, + "n_ast_nodes": 180, + "n_identifiers": 18, + "d_id": 81689, + "documentation": { + "docstring": "Generator for getting data for temporal multi io model.\n\n Args:\n sample_weights: List of sample_weights.\n\n Yields:\n Tuple of inputs, label, sample weights data.\n ", + "n_words": 22, + "vocab_size": 20, + "n_whitespaces": 61, + "language": "en" + } + }, + { + "id": 204321, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/contrib/sitemaps/__init__.py", + "file_name": "__init__.py", + "fun_name": "ping_google", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def ping_google(sitemap_url=None, ping_url=PING_URL, sitemap_uses_https=True):\n \n sitemap_full_url = _get_sitemap_full_url(sitemap_url, sitemap_uses_https)\n params = urlencode({\"sitemap\": sitemap_full_url})\n urlopen(\"%s?%s\" % (ping_url, params))\n\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 28, + "n_words": 16, + "vocab_size": 15, + "complexity": 1, + "nloc": 4, + "token_counts": 44, + "n_ast_nodes": 73, + "n_identifiers": 10, + "d_id": 50695, + "documentation": { + "docstring": "\n Alert Google that the sitemap for the current site has been updated.\n If sitemap_url is provided, it should be an absolute path to the sitemap\n for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this\n function will attempt to deduce it by using urls.reverse().\n ", + "n_words": 46, + "vocab_size": 34, + "n_whitespaces": 62, + "language": "en" + } + }, + { + "id": 292434, + "commit_id": "137793c06748b3914ae4906c9d11599dbd83d1fd", + "repo": "core", + "path": "tests/components/picnic/test_sensor.py", + "file_name": "test_sensor.py", + "fun_name": "test_sensors_no_data", + "commit_message": "Add sensors for next Picnic deliveries (#66474)", + "code": "async def test_sensors_no_data(self):\n \n # Setup platform with default responses\n await self._setup_platform(use_default_responses=True)\n\n # Change mock responses to empty data and refresh the coordinator\n self.picnic_mock().get_user.return_value = {}\n self.picnic_mock().get_cart.return_value = None\n self.picnic_mock().get_deliveries.return_value = None\n self.picnic_mock().get_delivery_position.side_effect = ValueError\n await self._coordinator.async_refresh()\n\n # Assert all default-enabled sensors have STATE_UNAVAILABLE because the last update failed\n assert self._coordinator.last_update_success is False\n self._assert_sensor(\"sensor.picnic_cart_total_price\", STATE_UNAVAILABLE)\n self._assert_sensor(\"sensor.picnic_selected_slot_start\", STATE_UNAVAILABLE)\n self._assert_sensor(\"sensor.picnic_selected_slot_end\", STATE_UNAVAILABLE)\n self._assert_sensor(\n \"sensor.picnic_selected_slot_max_order_time\", STATE_UNAVAILABLE\n )\n self._assert_sensor(\n \"sensor.picnic_selected_slot_min_order_value\", STATE_UNAVAILABLE\n )\n self._assert_sensor(\n \"sensor.picnic_last_order_max_order_time\", STATE_UNAVAILABLE\n )\n self._assert_sensor(\"sensor.picnic_last_order_delivery_time\", STATE_UNAVAILABLE)\n self._assert_sensor(\"sensor.picnic_next_delivery_eta_start\", STATE_UNAVAILABLE)\n self._assert_sensor(\"sensor.picnic_next_delivery_eta_end\", STATE_UNAVAILABLE)\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 270, + "n_words": 76, + "vocab_size": 55, + "complexity": 1, + "nloc": 23, + "token_counts": 148, + "n_ast_nodes": 258, + "n_identifiers": 17, + "d_id": 91520, + "documentation": { + "docstring": "Test sensor states when the api only returns empty objects.", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 204814, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/db/backends/base/base.py", + "file_name": "base.py", + "fun_name": "get_rollback", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def get_rollback(self):\n \n if not self.in_atomic_block:\n raise TransactionManagementError(\n \"The rollback flag doesn't work outside of an 'atomic' block.\"\n )\n return self.needs_rollback\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 78, + "n_words": 20, + "vocab_size": 20, + "complexity": 2, + "nloc": 6, + "token_counts": 21, + "n_ast_nodes": 39, + "n_identifiers": 5, + "d_id": 50899, + "documentation": { + "docstring": "Get the \"needs rollback\" flag -- for *advanced use* only.", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 221103, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/bdb.py", + "file_name": "bdb.py", + "fun_name": "get_file_breaks", + "commit_message": "add python 3.10.4 for windows", + "code": "def get_file_breaks(self, filename):\n \n filename = self.canonic(filename)\n if filename in self.breaks:\n return self.breaks[filename]\n else:\n return []\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 65, + "n_words": 15, + "vocab_size": 13, + "complexity": 2, + "nloc": 6, + "token_counts": 35, + "n_ast_nodes": 58, + "n_identifiers": 5, + "d_id": 56206, + "documentation": { + "docstring": "Return all lines with breakpoints for filename.\n\n If no breakpoints are set, return an empty list.\n ", + "n_words": 16, + "vocab_size": 15, + "n_whitespaces": 30, + "language": "en" + } + }, + { + "id": 264301, + "commit_id": "54834c47f8870e7faabcd847c3270da0bd3d2884", + "repo": "netbox", + "path": "netbox/netbox/views/generic/object_views.py", + "file_name": "object_views.py", + "fun_name": "get", + "commit_message": "Refactor generic views; add plugins dev documentation", + "code": "def get(self, request, **kwargs):\n \n instance = self.get_object(**kwargs)\n\n return render(request, self.get_template_name(), {\n 'object': instance,\n **self.get_extra_context(request, instance),\n })\n\n", + "url": "https://github.com/netbox-community/netbox.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 66, + "n_words": 16, + "vocab_size": 16, + "complexity": 1, + "nloc": 6, + "token_counts": 48, + "n_ast_nodes": 76, + "n_identifiers": 9, + "d_id": 77673, + "documentation": { + "docstring": "\n GET request handler. `*args` and `**kwargs` are passed to identify the object being queried.\n\n Args:\n request: The current request\n ", + "n_words": 19, + "vocab_size": 18, + "n_whitespaces": 52, + "language": "en" + } + }, + { + "id": 197261, + "commit_id": "c52d6ce2c8d5eecc5b891d05b1c32a456c4cd308", + "repo": "sympy", + "path": "sympy/polys/densetools.py", + "file_name": "densetools.py", + "fun_name": "dup_eval", + "commit_message": "fixed error in primitive_element for some case when ext=False", + "code": "def dup_eval(f, a, K):\n \n result = K.zero\n\n for c in f:\n result *= a\n result += c\n\n return result\n\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 45, + "n_words": 19, + "vocab_size": 15, + "complexity": 3, + "nloc": 8, + "token_counts": 40, + "n_ast_nodes": 45, + "n_identifiers": 7, + "d_id": 48415, + "documentation": { + "docstring": "\n Evaluate a polynomial at ``x = a`` in ``K[x]`` using Horner scheme.\n\n Examples\n ========\n\n >>> from sympy.polys import ring, ZZ\n >>> R, x = ring(\"x\", ZZ)\n\n >>> R.dup_eval(x**2 + 2*x + 3, 2)\n 11\n\n ", + "n_words": 34, + "vocab_size": 30, + "n_whitespaces": 59, + "language": "en" + } + }, + { + "id": 196116, + "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", + "repo": "sympy", + "path": "sympy/combinatorics/pc_groups.py", + "file_name": "pc_groups.py", + "fun_name": "map_relation", + "commit_message": "Updated import locations", + "code": "def map_relation(self, w):\n \n array = w.array_form\n s1 = array[0][0]\n s2 = array[1][0]\n key = ((s2, -1), (s1, 1), (s2, 1))\n key = self.free_group.dtype(key)\n return self.pc_presentation[key]\n\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 74, + "n_words": 25, + "vocab_size": 20, + "complexity": 1, + "nloc": 7, + "token_counts": 70, + "n_ast_nodes": 104, + "n_identifiers": 11, + "d_id": 47616, + "documentation": { + "docstring": "\n Return a conjugate relation.\n\n Explanation\n ===========\n\n Given a word formed by two free group elements, the\n corresponding conjugate relation with those free\n group elements is formed and mapped with the collected\n word in the polycyclic presentation.\n\n Examples\n ========\n\n >>> from sympy.combinatorics.named_groups import SymmetricGroup\n >>> from sympy.combinatorics import free_group\n >>> G = SymmetricGroup(3)\n >>> PcGroup = G.polycyclic_group()\n >>> collector = PcGroup.collector\n >>> F, x0, x1 = free_group(\"x0, x1\")\n >>> w = x1*x0\n >>> collector.map_relation(w)\n x1**2\n\n See Also\n ========\n\n pc_presentation\n\n ", + "n_words": 78, + "vocab_size": 55, + "n_whitespaces": 233, + "language": "en" + } + }, + { + "id": 265988, + "commit_id": "484efdaf75f267a43f9321b938fda1bc967b9e53", + "repo": "netbox", + "path": "netbox/utilities/templatetags/helpers.py", + "file_name": "helpers.py", + "fun_name": "applied_filters", + "commit_message": "Closes #9623: Implement saved filters (#10801)\n\n* Initial work on saved filters\r\n\r\n* Return only enabled/shared filters\r\n\r\n* Add tests\r\n\r\n* Clean up filtering of usable SavedFilters", + "code": "def applied_filters(context, model, form, query_params):\n \n user = context['request'].user\n form.is_valid() # Ensure cleaned_data has been set\n\n applied_filters = []\n for filter_name in form.changed_data:\n if filter_name not in form.cleaned_data:\n continue\n\n querydict = query_params.copy()\n if filter_name not in querydict:\n continue\n\n bound_field = form.fields[filter_name].get_bound_field(form, filter_name)\n querydict.pop(filter_name)\n display_value = ', '.join([str(v) for v in get_selected_values(form, filter_name)])\n\n applied_filters.append({\n 'name': filter_name,\n 'value': form.cleaned_data[filter_name],\n 'link_url': f'?{querydict.urlencode()}',\n 'link_text': f'{bound_field.label}: {display_value}',\n })\n\n save_link = None\n if user.has_perm('extras.add_savedfilter') and 'filter' not in context['request'].GET:\n content_type = ContentType.objects.get_for_model(model).pk\n parameters = context['request'].GET.urlencode()\n url = reverse('extras:savedfilter_add')\n save_link = f\"{url}?content_types={content_type}¶meters={quote(parameters)}\"\n\n return {\n 'applied_filters': applied_filters,\n 'save_link': save_link,\n }\n", + "url": "https://github.com/netbox-community/netbox.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 283, + "n_words": 91, + "vocab_size": 69, + "complexity": 7, + "nloc": 29, + "token_counts": 198, + "n_ast_nodes": 370, + "n_identifiers": 36, + "d_id": 78255, + "documentation": { + "docstring": "\n Display the active filters for a given filter form.\n ", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 16, + "language": "en" + } + }, + { + "id": 320783, + "commit_id": "a20bb67a878b2e68abf8268c1b0a27f018d01352", + "repo": "qutebrowser", + "path": "qutebrowser/keyinput/keyutils.py", + "file_name": "keyutils.py", + "fun_name": "text", + "commit_message": "mypy: Upgrade to PyQt5-stubs 5.15.6.0\n\nFor some unknown reason, those new stubs cause a *lot* of things now to be\nchecked by mypy which formerly probably got skipped due to Any being implied\nsomewhere.\n\nThe stubs themselves mainly improved, with a couple of regressions too.\n\nIn total, there were some 337 (!) new mypy errors. This commit fixes almost all\nof them, and the next commit improves a fix to get things down to 0 errors\nagain.\n\nOverview of the changes:\n\n==== qutebrowser/app.py\n\n- Drop type ignore due to improved stubs.\n\n==== qutebrowser/browser/browsertab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n This is debatable: I suppose the abstract stuff shouldn't need to know\n anything about the concrete backends at all. But it seems like we cut some\n corners when initially implementing things, and put some code in browsertab.py\n just because the APIs of both backends happened to be compatible. Perhaps\n something to reconsider once we drop QtWebKit and hopefully implement a dummy\n backend.\n\n- Add an additional assertion in AbstractAction.run_string. This is already\n covered by the isinstance(member, self.action_base) above it, but that's too\n dynamic for mypy to understand.\n\n- Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x\n and y components), not a single int.\n\n- Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x\n and y components), not a single int.\n\n- Fix the argument types of AbstractScroller.to_perc, as it's possible to pass\n fractional percentages too.\n\n- Specify the type for AbstractHistoryPrivate._history. See above (_widget) re\n this being debatable.\n\n- Fix the return type of AbstractTabPrivate.event_target(), which can be None\n (see #3888).\n\n- Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS\n return value), not None.\n\n- Fix the argument type for AbstractTabPrivate.toggle_inspector: position can\n be None to use the last used position.\n\n- Declare the type of sub-objects of AbstractTab.\n\n- Fix the return value of AbstractTab.icon(), which is the QIcon, not None.\n\n==== qutebrowser/browser/commands.py\n\n- Make sure the active window is a MainWindow (with a .win_id attribute).\n\n==== qutebrowser/browser/downloadview.py\n\n- Add _model() which makes sure that self.model() is a DownloadModel, not None\n or any other model. This is needed because other methods access a variety of\n custom attributes on it, e.g. last_index().\n\n==== qutebrowser/browser/greasemonkey.py\n\n- Add an ignore for AbstractDownload.requested_url which we patch onto the\n downloads. Probably would be nicer to add it as a proper attribute which always\n gets set by the DownloadManager.\n\n==== qutebrowser/browser/hints.py\n\n- Remove type ignores for QUrl.toString().\n- Add a new type ignore for combining different URL flags (which works, but is\n not exactly type safe... still probably a regression in the stubs).\n- Make sure the things we get back from self._get_keyparser are what we actually\n expect. Probably should introduce a TypedDict (and/or overloads for\n _get_keyparser with typing.Literal) to teach mypy about the exact return value.\n See #7098.\n This is needed because we access Hint/NormalKeyParser-specific attributes such\n as .set_inhibited_timout() or .update_bindings().\n\n==== qutebrowser/browser/inspector.py\n\n- Similar changes than in browsertab.py to make some types where we share API\n (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next\n commit.\n\n==== qutebrowser/browser/network/pac.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/qtnetworkdownloads.py\n\n- Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an\n AbstractDownload), so that we can call ._uses_nam() on it.\n\n==== qutebrowser/browser/qutescheme.py\n\n- Remove now unneeded type ignore for QUrl flags.\n\n==== qutebrowser/browser/urlmarks.py\n\n- Specify the type of UrlMarkManager._lineparser, as those only get initialized\n in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist.\n\n==== qutebrowser/browser/webelem.py\n\n- New casts to turn single KeyboardModifier (enum) entries into\n KeyboardModifiers (flags). Might not be needed anymore with Qt 6.\n- With that, casting the final value is now unneeded.\n\n==== qutebrowser/browser/webengine/notification.py\n\n- Remove now unneeded type ignore for signal.\n- Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished()\n is a QProcess, not just any QObject.\n\n==== qutebrowser/browser/webengine/webenginedownloads.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webengine/webengineelem.py\n\n- Specify the type of WebEngineElement._tab.\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webengineinspector.py\n\n- See changes to inspector.py and next commit.\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/webengine/webenginequtescheme.py\n\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webenginesettings.py\n\n- Ignore access of .setter attribute which we patch onto QWebEngineProfile.\n Would be nice to have a subclass or wrapper-class instead.\n\n==== qutebrowser/browser/webengine/webenginetab.py\n\n- Specified the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Remove some now-unneeded type ignores for creating FindFlags.\n- Specify more concrete types for WebEngineTab members where we actually need to\n access WebEngine-specific attributes.\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webengine/webview.py\n\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webkit/network/networkreply.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webkit/webkitinspector.py\n\n- See changes to inspector.py and next commit.\n\n==== qutebrowser/browser/webkit/webkittab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Add a type ignore for WebKitAction because our workaround needs to\n treat them as ints (which is allowed by PyQt, even if not type-safe).\n- Add new ignores for findText calls: The text is a QString and can be None; the\n flags are valid despite mypy thinking they aren't (stubs regression?).\n- Specify the type for WebKitHistoryPrivate._history, because we access\n WebKit-specific attributes. See above (_widget) re this being debatable.\n- Make mypy aware that .currentFrame() and .frameAt() can return None (stubs\n regression?).\n- Make sure the .page() and .page().networkAccessManager() are our subclasses\n rather than the more generic QtWebKit objects, as we use custom attributes.\n- Add new type ignores for signals (stubs regression!)\n\n==== qutebrowser/browser/webkit/webpage.py\n\n- Make sure the .networkAccessManager() is our subclass rather than the more\n generic QtWebKit object, as we use custom attributes.\n- Replace a cast by a type ignore. The cast didn't work anymore.\n\n==== qutebrowser/browser/webkit/webview.py\n\n- Make sure the .page() is our subclass rather than the more generic QtWebKit\n object, as we use custom attributes.\n\n==== qutebrowser/commands/userscripts.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/completion/completer.py\n\n- Add a new _completion() getter (which ensures it actually gets the completion\n view) rather than accessing the .parent() directly (which could be any QObject).\n\n==== qutebrowser/completion/completiondelegate.py\n\n- Make sure self.parent() is a CompletionView (no helper method as there is only\n one instance).\n- Remove a now-unneeded type ignore for adding QSizes.\n\n==== qutebrowser/completion/completionwidget.py\n\n- Add a ._model() getter which ensures that we get a CompletionModel (with\n custom attributes) rather than Qt's .model() which can be any QAbstractItemModel\n (or None).\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/completion/models/completionmodel.py\n\n- Remove now unneeded type ignores for signals.\n- Ignore a complaint about .set_pattern() not being defined. Completion\n categories don't share any common parent class, so it would be good to introduce\n a typing.Protocol for this. See #7098.\n\n==== qutebrowser/components/misccommands.py\n\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/components/readlinecommands.py\n\n- Make sure QApplication.instance() is a QApplication (and not just a\n QCoreApplication). This includes the former \"not None\" check.\n\n==== qutebrowser/components/scrollcommands.py\n\n- Add basic annotation for \"funcs\" dict. Could have a callable protocol to\n specify it needs a count kwarg, see #7098.\n\n==== qutebrowser/config/stylesheet.py\n\n- Correctly specify that stylesheet apply to QWidgets, not any QObject.\n- Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy\n about this with overloads and protocols (stylesheet for set_register being None\n => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not\n worth the troble. See #7098.\n\n==== qutebrowser/keyinput/keyutils.py\n\n- Remove some now-unneeded type ignores and add a cast for using a single enum\n value as flags. Might need to look at this again with Qt 6 support.\n\n==== qutebrowser/keyinput/modeman.py\n\n- Add a FIXME for using a TypedDict, see comments for hints.py above.\n\n==== qutebrowser/mainwindow/mainwindow.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n- Improve where we cast from WindowType to WindowFlags, no int needed\n- Use new .tab_bar() getter, see below.\n\n==== qutebrowser/mainwindow/prompt.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n\n==== qutebrowser/mainwindow/statusbar/bar.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/statusbar/command.py\n\n- Fix type for setText() override (from QLineEdit): text can be None\n (QString in C++).\n\n==== qutebrowser/mainwindow/statusbar/url.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/tabbedbrowser.py\n\n- Specify that TabDeque manages browser tabs, not any QWidgets. It accesses\n AbstractTab-specific attributes.\n- Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access\n .maybe_hide.\n- Fix the annotations for stored marks: Scroll positions are a QPoint, not int.\n- Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and\n .widget(), which ensures that the return values are valid AbstractTabs (or None\n for _tab_by_idx). This is needed because we access AbstractTab-specific\n attributes.\n- For some places, where the tab can be None, continue using .currentTab() but\n add asserts.\n- Remove some now-unneeded [unreachable] ignores, as mypy knows about the None\n possibility now.\n\n==== qutebrowser/mainwindow/tabwidget.py\n\n- Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and\n .widget() are of type TabBar and AbstractTab, respectively.\n- Add additional assertions where we expect ._tab_by_idx() to never be None.\n- Remove dead code in get_tab_fields for handling a None y scroll position. I\n was unable to find any place in the code where this could be set to None.\n- Remove some now-unneeded type ignores and casts, as mypy now knows that\n _type_by_idx() could be None.\n- Work around a strange instance where mypy complains about not being able to\n find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility,\n despite it clearly being shown as a bool *inside* that class without any\n annotation.\n- Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in\n fact a TabWidget.\n\n==== qutebrowser/misc/crashsignal.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/editor.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/ipc.py\n\n- Remove now unneeded type ignores for signals.\n- Add new type ignores for .error() which is both a signal and a getter\n (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was\n renamed to errorOccurred in 5.15.\n\n==== qutebrowser/misc/objects.py\n\n- Make sure mypy knows that objects.app is our custom Application (with custom\n attributes) rather than any QApplication.\n\n==== qutebrowser/utils/objreg.py\n\n- Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol,\n but ideally, the whole objreg stuff should die one day anyways.\n\n==== tests/unit/completion/test_completer.py\n\n- Make CompletionWidgetStub inherit from CompletionView so that it passes the\n new isinstance() asserts in completer.py (see above).", + "code": "def text(self) -> str:\n \n control = {\n Qt.Key_Space: ' ',\n Qt.Key_Tab: '\\t',\n Qt.Key_Backspace: '\\b',\n Qt.Key_Return: '\\r',\n Qt.Key_Enter: '\\r',\n Qt.Key_Escape: '\\x1b',\n }\n\n if self.key in control:\n return control[self.key]\n elif not _is_printable(self.key):\n return ''\n\n text = QKeySequence(self.key).toString()\n if not self.modifiers & Qt.ShiftModifier:\n text = text.lower()\n return text\n", + "url": "https://github.com/qutebrowser/qutebrowser.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 200, + "n_words": 45, + "vocab_size": 36, + "complexity": 4, + "nloc": 18, + "token_counts": 104, + "n_ast_nodes": 180, + "n_identifiers": 18, + "d_id": 117350, + "documentation": { + "docstring": "Get the text which would be displayed when pressing this key.", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 147843, + "commit_id": "00922817b66ee14ba215972a98f416f3d6fef1ba", + "repo": "ray", + "path": "rllib/execution/train_ops.py", + "file_name": "train_ops.py", + "fun_name": "train_one_step", + "commit_message": "[RLlib] Rewrite PPO to use training_iteration + enable DD-PPO for Win32. (#23673)", + "code": "def train_one_step(trainer, train_batch, policies_to_train=None) -> Dict:\n \n\n config = trainer.config\n workers = trainer.workers\n local_worker = workers.local_worker()\n num_sgd_iter = config.get(\"num_sgd_iter\", 1)\n sgd_minibatch_size = config.get(\"sgd_minibatch_size\", 0)\n\n learn_timer = trainer._timers[LEARN_ON_BATCH_TIMER]\n with learn_timer:\n # Subsample minibatches (size=`sgd_minibatch_size`) from the\n # train batch and loop through train batch `num_sgd_iter` times.\n if num_sgd_iter > 1 or sgd_minibatch_size > 0:\n info = do_minibatch_sgd(\n train_batch,\n {\n pid: local_worker.get_policy(pid)\n for pid in policies_to_train\n or local_worker.get_policies_to_train(train_batch)\n },\n local_worker,\n num_sgd_iter,\n sgd_minibatch_size,\n [],\n )\n # Single update step using train batch.\n else:\n info = local_worker.learn_on_batch(train_batch)\n\n learn_timer.push_units_processed(train_batch.count)\n trainer._counters[NUM_ENV_STEPS_TRAINED] += train_batch.count\n trainer._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps()\n\n return info\n\n\n@DeveloperAPI", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "@DeveloperAPI", + "n_ast_errors": 1, + "ast_levels": 17, + "n_whitespaces": 357, + "n_words": 92, + "vocab_size": 72, + "complexity": 5, + "nloc": 40, + "token_counts": 151, + "n_ast_nodes": 245, + "n_identifiers": 27, + "d_id": 34101, + "documentation": { + "docstring": "Function that improves the all policies in `train_batch` on the local worker.\n\n Examples:\n >>> from ray.rllib.execution.rollout_ops import synchronous_parallel_sample\n >>> trainer = [...] # doctest: +SKIP\n >>> train_batch = synchronous_parallel_sample(trainer.workers) # doctest: +SKIP\n >>> # This trains the policy on one batch.\n >>> results = train_one_step(trainer, train_batch)) # doctest: +SKIP\n {\"default_policy\": ...}\n\n Updates the NUM_ENV_STEPS_TRAINED and NUM_AGENT_STEPS_TRAINED counters as well as\n the LEARN_ON_BATCH_TIMER timer of the `trainer` object.\n ", + "n_words": 67, + "vocab_size": 47, + "n_whitespaces": 121, + "language": "en" + } + }, + { + "id": 197360, + "commit_id": "65be461082dda54c8748922f9c29a19af1279fe1", + "repo": "sympy", + "path": "sympy/solvers/deutils.py", + "file_name": "deutils.py", + "fun_name": "_preprocess", + "commit_message": "Remove abbreviations in documentation", + "code": "def _preprocess(expr, func=None, hint='_Integral'):\n \n if isinstance(expr, Pow):\n # if f(x)**p=0 then f(x)=0 (p>0)\n if (expr.exp).is_positive:\n expr = expr.base\n derivs = expr.atoms(Derivative)\n if not func:\n funcs = set().union(*[d.atoms(AppliedUndef) for d in derivs])\n if len(funcs) != 1:\n raise ValueError('The function cannot be '\n 'automatically detected for %s.' % expr)\n func = funcs.pop()\n fvars = set(func.args)\n if hint is None:\n return expr, func\n reps = [(d, d.doit()) for d in derivs if not hint.endswith('_Integral') or\n d.has(func) or set(d.variables) & fvars]\n eq = expr.subs(reps)\n return eq, func\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 200, + "n_words": 83, + "vocab_size": 61, + "complexity": 11, + "nloc": 18, + "token_counts": 163, + "n_ast_nodes": 268, + "n_identifiers": 29, + "d_id": 48503, + "documentation": { + "docstring": "Prepare expr for solving by making sure that differentiation\n is done so that only func remains in unevaluated derivatives and\n (if hint does not end with _Integral) that doit is applied to all\n other derivatives. If hint is None, do not do any differentiation.\n (Currently this may cause some simple differential equations to\n fail.)\n\n In case func is None, an attempt will be made to autodetect the\n function to be solved for.\n\n >>> from sympy.solvers.deutils import _preprocess\n >>> from sympy import Derivative, Function\n >>> from sympy.abc import x, y, z\n >>> f, g = map(Function, 'fg')\n\n If f(x)**p == 0 and p>0 then we can solve for f(x)=0\n >>> _preprocess((f(x).diff(x)-4)**5, f(x))\n (Derivative(f(x), x) - 4, f(x))\n\n Apply doit to derivatives that contain more than the function\n of interest:\n\n >>> _preprocess(Derivative(f(x) + x, x))\n (Derivative(f(x), x) + 1, f(x))\n\n Do others if the differentiation variable(s) intersect with those\n of the function of interest or contain the function of interest:\n\n >>> _preprocess(Derivative(g(x), y, z), f(y))\n (0, f(y))\n >>> _preprocess(Derivative(f(y), z), f(y))\n (0, f(y))\n\n Do others if the hint does not end in '_Integral' (the default\n assumes that it does):\n\n >>> _preprocess(Derivative(g(x), y), f(x))\n (Derivative(g(x), y), f(x))\n >>> _preprocess(Derivative(f(x), y), f(x), hint='')\n (0, f(x))\n\n Do not do any derivatives if hint is None:\n\n >>> eq = Derivative(f(x) + 1, x) + Derivative(f(x), y)\n >>> _preprocess(eq, f(x), hint=None)\n (Derivative(f(x) + 1, x) + Derivative(f(x), y), f(x))\n\n If it's not clear what the function of interest is, it must be given:\n\n >>> eq = Derivative(f(x) + g(x), x)\n >>> _preprocess(eq, g(x))\n (Derivative(f(x), x) + Derivative(g(x), x), g(x))\n >>> try: _preprocess(eq)\n ... except ValueError: print(\"A ValueError was raised.\")\n A ValueError was raised.\n\n ", + "n_words": 276, + "vocab_size": 153, + "n_whitespaces": 402, + "language": "en" + } + }, + { + "id": 266301, + "commit_id": "80ced6b782e15179e7f35f0ef6737a65ddd60f92", + "repo": "netbox", + "path": "netbox/utilities/forms/forms.py", + "file_name": "forms.py", + "fun_name": "_clean_yaml", + "commit_message": "Closes #11163: Auto-detect data format during bulk import", + "code": "def _clean_yaml(self, data):\n \n records = []\n try:\n for data in yaml.load_all(data, Loader=yaml.SafeLoader):\n if type(data) == list:\n records.extend(data)\n elif type(data) == dict:\n records.append(data)\n else:\n raise forms.ValidationError({\n self.data_field: _(\n \"Invalid YAML data. Data must be in the form of multiple documents, or a single document \"\n \"comprising a list of dictionaries.\"\n )\n })\n except yaml.error.YAMLError as err:\n raise forms.ValidationError({\n self.data_field: f\"Invalid YAML data: {err}\"\n })\n\n return records\n\n", + "url": "https://github.com/netbox-community/netbox.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 20, + "n_whitespaces": 369, + "n_words": 65, + "vocab_size": 54, + "complexity": 5, + "nloc": 20, + "token_counts": 102, + "n_ast_nodes": 174, + "n_identifiers": 20, + "d_id": 78362, + "documentation": { + "docstring": "\n Clean YAML-formatted data. Data must be either\n a) A single document comprising a list of dictionaries (each representing an object), or\n b) Multiple documents, separated with the '---' token\n ", + "n_words": 29, + "vocab_size": 29, + "n_whitespaces": 62, + "language": "en" + } + }, + { + "id": 246053, + "commit_id": "18862f20b5495bdc556c54e92fd4b1efdc718ba7", + "repo": "synapse", + "path": "tests/rest/admin/test_user.py", + "file_name": "test_user.py", + "fun_name": "test_requester_is_no_admin", + "commit_message": "Remove the 'password_hash' from the Users Admin API endpoint response dictionary (#11576)", + "code": "def test_requester_is_no_admin(self):\n \n url = self.url_prefix % \"@bob:test\"\n\n channel = self.make_request(\n \"GET\",\n url,\n access_token=self.other_user_token,\n )\n\n self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)\n self.assertEqual(\"You are not a server admin\", channel.json_body[\"error\"])\n\n channel = self.make_request(\n \"PUT\",\n url,\n access_token=self.other_user_token,\n content=b\"{}\",\n )\n\n self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)\n self.assertEqual(\"You are not a server admin\", channel.json_body[\"error\"])\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 189, + "n_words": 42, + "vocab_size": 25, + "complexity": 1, + "nloc": 17, + "token_counts": 114, + "n_ast_nodes": 183, + "n_identifiers": 15, + "d_id": 70969, + "documentation": { + "docstring": "\n If the user is not a server admin, an error is returned.\n ", + "n_words": 12, + "vocab_size": 11, + "n_whitespaces": 27, + "language": "en" + } + }, + { + "id": 261218, + "commit_id": "537c325f2927895449ce418b3a77750135c0ba7b", + "repo": "scikit-learn", + "path": "sklearn/utils/__init__.py", + "file_name": "__init__.py", + "fun_name": "axis0_safe_slice", + "commit_message": "DOC Ensure that sklearn.utils.axis0_safe_slice passes numpydoc (#24561)", + "code": "def axis0_safe_slice(X, mask, len_mask):\n \n if len_mask != 0:\n return X[safe_mask(X, mask), :]\n return np.zeros(shape=(0, X.shape[1]))\n\n", + "url": "https://github.com/scikit-learn/scikit-learn.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 31, + "n_words": 15, + "vocab_size": 14, + "complexity": 2, + "nloc": 4, + "token_counts": 45, + "n_ast_nodes": 68, + "n_identifiers": 8, + "d_id": 76691, + "documentation": { + "docstring": "Return a mask which is safer to use on X than safe_mask.\n\n This mask is safer than safe_mask since it returns an\n empty array, when a sparse matrix is sliced with a boolean mask\n with all False, instead of raising an unhelpful error in older\n versions of SciPy.\n\n See: https://github.com/scipy/scipy/issues/5361\n\n Also note that we can avoid doing the dot product by checking if\n the len_mask is not zero in _huber_loss_and_gradient but this\n is not going to be the bottleneck, since the number of outliers\n and non_outliers are typically non-zero and it makes the code\n tougher to follow.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n Data on which to apply mask.\n\n mask : ndarray\n Mask to be used on X.\n\n len_mask : int\n The length of the mask.\n\n Returns\n -------\n mask : ndarray\n Array that is safe to use on X.\n ", + "n_words": 140, + "vocab_size": 91, + "n_whitespaces": 225, + "language": "en" + } + }, + { + "id": 780, + "commit_id": "b61c1fc4b83fc740d3d9d0d84d0ca6022a3c49bb", + "repo": "PySyft", + "path": "packages/syft/tests/syft/core/io/location/specific_test.py", + "file_name": "specific_test.py", + "fun_name": "test_binary_serialization", + "commit_message": "Refactored store interface to eliminate confusion with __getitem__\n\n- Fixed serde tests effected by protobuf magic bytes", + "code": "def test_binary_serialization() -> None:\n \n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n assert sy.serialize(obj, to_bytes=True) == blob_bytes\n\n", + "url": "https://github.com/OpenMined/PySyft.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 28, + "n_words": 16, + "vocab_size": 15, + "complexity": 1, + "nloc": 5, + "token_counts": 47, + "n_ast_nodes": 78, + "n_identifiers": 15, + "d_id": 118, + "documentation": { + "docstring": "Tests that binary SpecificLocation serializes as expected", + "n_words": 7, + "vocab_size": 7, + "n_whitespaces": 6, + "language": "en" + } + }, + { + "id": 196400, + "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", + "repo": "sympy", + "path": "sympy/matrices/repmatrix.py", + "file_name": "repmatrix.py", + "fun_name": "col_op", + "commit_message": "Moved imports to higher level", + "code": "def col_op(self, j, f):\n \n for i in range(self.rows):\n self[i, j] = f(self[i, j], i)\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 39, + "n_words": 14, + "vocab_size": 14, + "complexity": 2, + "nloc": 3, + "token_counts": 38, + "n_ast_nodes": 56, + "n_identifiers": 7, + "d_id": 47900, + "documentation": { + "docstring": "In-place operation on col j using two-arg functor whose args are\n interpreted as (self[i, j], i).\n\n Examples\n ========\n\n >>> from sympy import eye\n >>> M = eye(3)\n >>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M\n Matrix([\n [1, 2, 0],\n [0, 1, 0],\n [0, 0, 1]])\n\n See Also\n ========\n col\n row_op\n ", + "n_words": 52, + "vocab_size": 45, + "n_whitespaces": 157, + "language": "en" + } + }, + { + "id": 269566, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/backend.py", + "file_name": "backend.py", + "fun_name": "update_sub", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def update_sub(x, decrement):\n \n return tf.compat.v1.assign_sub(x, decrement)\n\n\n@keras_export(\"keras.backend.moving_average_update\")\n@doc_controls.do_not_generate_docs", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "@keras_export(\"keras.backend.moving_average_update\")\n@doc_controls.do_not_generate_docs", + "n_ast_errors": 1, + "ast_levels": 9, + "n_whitespaces": 12, + "n_words": 8, + "vocab_size": 8, + "complexity": 1, + "nloc": 2, + "token_counts": 21, + "n_ast_nodes": 51, + "n_identifiers": 10, + "d_id": 80190, + "documentation": { + "docstring": "Update the value of `x` by subtracting `decrement`.\n\n Args:\n x: A Variable.\n decrement: A tensor of same shape as `x`.\n\n Returns:\n The variable `x` updated.\n ", + "n_words": 25, + "vocab_size": 22, + "n_whitespaces": 55, + "language": "en" + } + }, + { + "id": 271241, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/engine/functional.py", + "file_name": "functional.py", + "fun_name": "_insert_layers", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def _insert_layers(self, layers, relevant_nodes=None):\n \n layers = tf.nest.flatten(layers)\n tf_utils.assert_no_legacy_layers(layers)\n node_to_depth = {}\n for depth, nodes in self._nodes_by_depth.items():\n node_to_depth.update({node: depth for node in nodes})\n # The nodes of these Layers that are relevant to this Network. If not\n # provided, assume all Nodes are relevant\n if not relevant_nodes:\n relevant_nodes = tf.nest.flatten(\n [layer._inbound_nodes for layer in layers]\n )\n network_nodes = set(relevant_nodes + list(node_to_depth.keys()))\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 171, + "n_words": 60, + "vocab_size": 48, + "complexity": 11, + "nloc": 44, + "token_counts": 275, + "n_ast_nodes": 155, + "n_identifiers": 22, + "d_id": 80733, + "documentation": { + "docstring": "Inserts Layers into the Network after Network creation.\n\n This is only valid for Keras Graph Networks. Layers added via this function\n will be included in the `call` computation and `get_config` of this Network.\n They will not be added to the Network's outputs.\n\n Args:\n layers: Arbitrary nested structure of Layers. Layers must be reachable\n from one or more of the `keras.Input` Tensors that correspond to this\n Network's inputs.\n relevant_nodes: Nodes from the Layers that should be considered part of\n this Network. If `None`, all Nodes will be considered part of this\n Network.\n\n Raises:\n ValueError: If the layers depend on `Input`s not found in this Model.\n ", + "n_words": 104, + "vocab_size": 67, + "n_whitespaces": 218, + "language": "en" + } + }, + { + "id": 56283, + "commit_id": "5afded9fe6724d9e336f59792ee1d60656a2d94d", + "repo": "prefect", + "path": "tests/cli/test_deployment_preview.py", + "file_name": "test_deployment_preview.py", + "fun_name": "test_previewing_multiple_kubernetes_deployments_from_python", + "commit_message": "Add a CLI command to preview how a FlowRun will appear in any FlowRunner's execution environment (PrefectHQ/orion#1971)\n\nCo-authored-by: Terrence Dorsey \r\nCo-authored-by: Michael Adkins ", + "code": "def test_previewing_multiple_kubernetes_deployments_from_python():\n \n\n result = invoke_and_assert(\n [\n \"deployment\",\n \"preview\",\n \"./tests/deployment_test_files/multiple_kubernetes_deployments.py\",\n ],\n expected_output_contains=\"kind: Job\",\n )\n assert result.stdout.endswith(\"\\n\")\n\n previews = [p.strip() for p in re.split(\"Preview for .+:\", result.stdout) if p]\n assert len(previews) == 4 # there should be 3 K8s and 1 non-K8s in the file\n\n # spot-check a few attributes of the first one\n manifest = yaml.load(previews[0], yaml.SafeLoader)\n assert manifest[\"apiVersion\"] == \"batch/v1\"\n assert manifest[\"kind\"] == \"Job\"\n assert manifest[\"metadata\"][\"generateName\"] == \"cool-name\"\n\n container = manifest[\"spec\"][\"template\"][\"spec\"][\"containers\"][0]\n assert \"PREFECT_TEST_MODE\" in [variable[\"name\"] for variable in container[\"env\"]]\n\n # spot-check a few attributes of the third one, which is customized\n manifest = yaml.load(previews[2], yaml.SafeLoader)\n assert manifest[\"apiVersion\"] == \"batch/v1\"\n assert manifest[\"kind\"] == \"Job\"\n assert manifest[\"metadata\"][\"generateName\"] == \"cool-name\"\n\n container = manifest[\"spec\"][\"template\"][\"spec\"][\"containers\"][0]\n assert \"MY_ENV_VAR\" in [variable[\"name\"] for variable in container[\"env\"]]\n\n", + "url": "https://github.com/PrefectHQ/prefect.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 233, + "n_words": 118, + "vocab_size": 68, + "complexity": 5, + "nloc": 24, + "token_counts": 209, + "n_ast_nodes": 375, + "n_identifiers": 18, + "d_id": 11494, + "documentation": { + "docstring": "`prefect deployment preview my-flow-file.py` should render multiple\n Kubernetes Jobs from a deployment file", + "n_words": 13, + "vocab_size": 12, + "n_whitespaces": 15, + "language": "en" + } + }, + { + "id": 157475, + "commit_id": "ca86da3a30c4e080d4db8c25fca73de843663cb4", + "repo": "stablediffusion", + "path": "ldm/modules/ema.py", + "file_name": "ema.py", + "fun_name": "restore", + "commit_message": "release more models", + "code": "def restore(self, parameters):\n \n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)\n", + "url": "https://github.com/Stability-AI/stablediffusion.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 35, + "n_words": 10, + "vocab_size": 9, + "complexity": 2, + "nloc": 3, + "token_counts": 32, + "n_ast_nodes": 51, + "n_identifiers": 9, + "d_id": 36941, + "documentation": { + "docstring": "\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n ", + "n_words": 55, + "vocab_size": 36, + "n_whitespaces": 125, + "language": "en" + } + }, + { + "id": 261292, + "commit_id": "e41753ebd57c44ae91b389f190c43ddc0b384a75", + "repo": "scikit-learn", + "path": "sklearn/linear_model/tests/test_least_angle.py", + "file_name": "test_least_angle.py", + "fun_name": "test_lassolarsic_alpha_selection", + "commit_message": "MAINT Clean deprecation for 1.2: normalize in linear models (#24391)", + "code": "def test_lassolarsic_alpha_selection(criterion):\n \n model = make_pipeline(StandardScaler(), LassoLarsIC(criterion=criterion))\n model.fit(X, y)\n\n best_alpha_selected = np.argmin(model[-1].criterion_)\n assert best_alpha_selected == 7\n\n\n@pytest.mark.parametrize(\"fit_intercept\", [True, False])", + "url": "https://github.com/scikit-learn/scikit-learn.git", + "language": "Python", + "ast_errors": "@pytest.mark.parametrize(\"fit_intercept\", [True, False])", + "n_ast_errors": 1, + "ast_levels": 11, + "n_whitespaces": 32, + "n_words": 18, + "vocab_size": 16, + "complexity": 1, + "nloc": 5, + "token_counts": 47, + "n_ast_nodes": 101, + "n_identifiers": 16, + "d_id": 76733, + "documentation": { + "docstring": "Check that we properly compute the AIC and BIC score.\n\n In this test, we reproduce the example of the Fig. 2 of Zou et al.\n (reference [1] in LassoLarsIC) In this example, only 7 features should be\n selected.\n ", + "n_words": 38, + "vocab_size": 32, + "n_whitespaces": 50, + "language": "en" + } + }, + { + "id": 104415, + "commit_id": "e35be138148333078284b942ccc9ed7b1d826f97", + "repo": "datasets", + "path": "src/datasets/table.py", + "file_name": "table.py", + "fun_name": "equals", + "commit_message": "Update docs to new frontend/UI (#3690)\n\n* WIP: update docs to new UI\r\n\r\n* make style\r\n\r\n* Rm unused\r\n\r\n* inject_arrow_table_documentation __annotations__\r\n\r\n* hasattr(arrow_table_method, \"__annotations__\")\r\n\r\n* Update task_template.rst\r\n\r\n* Codeblock PT-TF-SPLIT\r\n\r\n* Convert loading scripts\r\n\r\n* Convert docs to mdx\r\n\r\n* Fix mdx\r\n\r\n* Add \r\n\r\n* Convert mdx tables\r\n\r\n* Fix codeblock\r\n\r\n* Rm unneded hashlinks\r\n\r\n* Update index.mdx\r\n\r\n* Redo dev change\r\n\r\n* Rm circle ci `build_doc` & `deploy_doc`\r\n\r\n* Rm unneeded files\r\n\r\n* Update docs reamde\r\n\r\n* Standardize to `Example::`\r\n\r\n* mdx logging levels doc\r\n\r\n* Table properties inject_arrow_table_documentation\r\n\r\n* ``` to ```py mdx\r\n\r\n* Add Tips mdx\r\n\r\n* important,None -> \r\n\r\n* More misc\r\n\r\n* Center imgs\r\n\r\n* Update instllation page\r\n\r\n* `setup.py` docs section\r\n\r\n* Rm imgs since they are in hf.co\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* Update index mdx\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* just `Dataset` obj\r\n\r\n* Addedversion just italics\r\n\r\n* Update ReadInstruction doc example syntax\r\n\r\n* Change docstring for `prepare_for_task`\r\n\r\n* Chore\r\n\r\n* Remove `code` syntax from headings\r\n\r\n* Rm `code` syntax from headings\r\n\r\n* Hashlink backward compatability\r\n\r\n* S3FileSystem doc\r\n\r\n* S3FileSystem doc updates\r\n\r\n* index.mdx updates\r\n\r\n* Add darkmode gifs\r\n\r\n* Index logo img css classes\r\n\r\n* Index mdx dataset logo img size\r\n\r\n* Docs for DownloadMode class\r\n\r\n* Doc DownloadMode table\r\n\r\n* format docstrings\r\n\r\n* style\r\n\r\n* Add doc builder scripts (#3790)\r\n\r\n* add doc builder scripts\r\n\r\n* fix docker image\r\n\r\n* Docs new UI actions no self hosted (#3793)\r\n\r\n* No self hosted\r\n\r\n* replace doc injection by actual docstrings\r\n\r\n* Docstring formatted\r\n\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Mishig Davaadorj \r\n\r\nCo-authored-by: Lysandre Debut \r\nCo-authored-by: Mishig Davaadorj \r\n\r\n* Rm notebooks from docs actions since they dont exi\r\n\r\n* Update tsting branch\r\n\r\n* More docstring\r\n\r\n* Chore\r\n\r\n* bump up node version\r\n\r\n* bump up node\r\n\r\n* ``` -> ```py for audio_process.mdx\r\n\r\n* Update .github/workflows/build_documentation.yml\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Uodate dev doc build\r\n\r\n* remove run on PR\r\n\r\n* fix action\r\n\r\n* Fix gh doc workflow\r\n\r\n* forgot this change when merging master\r\n\r\n* Update build doc\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: Lysandre Debut ", + "code": "def equals(self, *args, **kwargs):\n \n args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args)\n kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs}\n return self.table.equals(*args, **kwargs)\n", + "url": "https://github.com/huggingface/datasets.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 61, + "n_words": 33, + "vocab_size": 25, + "complexity": 5, + "nloc": 4, + "token_counts": 70, + "n_ast_nodes": 105, + "n_identifiers": 11, + "d_id": 21851, + "documentation": { + "docstring": "\n Check if contents of two tables are equal.\n\n Args:\n other (:class:`datasets.table.Table`):\n Table to compare against.\n check_metadata (:obj:`bool`, defaults to :obj:`False`):\n Whether schema metadata equality should be checked as well.\n\n Returns:\n :obj:`bool`\n ", + "n_words": 31, + "vocab_size": 30, + "n_whitespaces": 123, + "language": "en" + } + }, + { + "id": 21391, + "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", + "repo": "pipenv", + "path": "pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py", + "file_name": "tarfile.py", + "fun_name": "makelink", + "commit_message": "Vendor in pip 22.1.2", + "code": "def makelink(self, tarinfo, targetpath):\n \n try:\n # For systems that support symbolic and hard links.\n if tarinfo.issym():\n os.symlink(tarinfo.linkname, targetpath)\n else:\n # See extract().\n if os.path.exists(tarinfo._link_target):\n os.link(tarinfo._link_target, targetpath)\n else:\n self._extract_member(self._find_link_target(tarinfo),\n targetpath)\n except symlink_exception:\n if tarinfo.issym():\n linkpath = os.path.join(os.path.dirname(tarinfo.name),\n tarinfo.linkname)\n else:\n linkpath = tarinfo.linkname\n else:\n try:\n self._extract_member(self._find_link_target(tarinfo),\n targetpath)\n except KeyError:\n raise ExtractError(\"unable to resolve link inside archive\")\n", + "url": "https://github.com/pypa/pipenv.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 432, + "n_words": 54, + "vocab_size": 39, + "complexity": 6, + "nloc": 22, + "token_counts": 133, + "n_ast_nodes": 219, + "n_identifiers": 21, + "d_id": 3804, + "documentation": { + "docstring": "Make a (symbolic) link called targetpath. If it cannot be created\n (platform limitation), we try to make a copy of the referenced file\n instead of a link.\n ", + "n_words": 27, + "vocab_size": 24, + "n_whitespaces": 52, + "language": "en" + } + }, + { + "id": 221364, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/codecs.py", + "file_name": "codecs.py", + "fun_name": "getincrementaldecoder", + "commit_message": "add python 3.10.4 for windows", + "code": "def getincrementaldecoder(encoding):\n\n \n decoder = lookup(encoding).incrementaldecoder\n if decoder is None:\n raise LookupError(encoding)\n return decoder\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 32, + "n_words": 13, + "vocab_size": 11, + "complexity": 2, + "nloc": 5, + "token_counts": 26, + "n_ast_nodes": 45, + "n_identifiers": 6, + "d_id": 56378, + "documentation": { + "docstring": " Lookup up the codec for the given encoding and return\n its IncrementalDecoder class or factory function.\n\n Raises a LookupError in case the encoding cannot be found\n or the codecs doesn't provide an incremental decoder.\n\n ", + "n_words": 34, + "vocab_size": 29, + "n_whitespaces": 59, + "language": "en" + } + }, + { + "id": 164583, + "commit_id": "ee6b0a09fff7789879c3322edffe9f84d10acee3", + "repo": "pandas", + "path": "pandas/tests/reshape/concat/test_append_common.py", + "file_name": "test_append_common.py", + "fun_name": "_check_expected_dtype", + "commit_message": "ENH: Index[bool] (#45061)", + "code": "def _check_expected_dtype(self, obj, label):\n \n if isinstance(obj, Index):\n assert obj.dtype == label\n elif isinstance(obj, Series):\n if label.startswith(\"period\"):\n assert obj.dtype == \"Period[M]\"\n else:\n assert obj.dtype == label\n else:\n raise ValueError\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 130, + "n_words": 28, + "vocab_size": 18, + "complexity": 4, + "nloc": 10, + "token_counts": 58, + "n_ast_nodes": 96, + "n_identifiers": 10, + "d_id": 39575, + "documentation": { + "docstring": "\n Check whether obj has expected dtype depending on label\n considering not-supported dtypes\n ", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 34, + "language": "en" + } + }, + { + "id": 206991, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "tests/admin_changelist/tests.py", + "file_name": "tests.py", + "fun_name": "test_no_duplicates_for_m2m_in_list_filter", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def test_no_duplicates_for_m2m_in_list_filter(self):\n \n blues = Genre.objects.create(name=\"Blues\")\n band = Band.objects.create(name=\"B.B. King Review\", nr_of_members=11)\n\n band.genres.add(blues)\n band.genres.add(blues)\n\n m = BandAdmin(Band, custom_site)\n request = self.factory.get(\"/band/\", data={\"genres\": blues.pk})\n request.user = self.superuser\n\n cl = m.get_changelist_instance(request)\n cl.get_results(request)\n\n # There's only one Group instance\n self.assertEqual(cl.result_count, 1)\n # Queryset must be deletable.\n self.assertIs(cl.queryset.query.distinct, False)\n cl.queryset.delete()\n self.assertEqual(cl.queryset.count(), 0)\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 159, + "n_words": 47, + "vocab_size": 40, + "complexity": 1, + "nloc": 14, + "token_counts": 144, + "n_ast_nodes": 238, + "n_identifiers": 33, + "d_id": 51826, + "documentation": { + "docstring": "\n Regression test for #13902: When using a ManyToMany in list_filter,\n results shouldn't appear more than once. Basic ManyToMany.\n ", + "n_words": 18, + "vocab_size": 18, + "n_whitespaces": 40, + "language": "en" + } + }, + { + "id": 189462, + "commit_id": "902e7eb4f0147b5882a613b67467e38a1d47f01e", + "repo": "manim", + "path": "manim/mobject/svg/svg_mobject.py", + "file_name": "svg_mobject.py", + "fun_name": "handle_transforms", + "commit_message": "Hide more private methods from the docs. (#2468)\n\n* hide privs from text_mobject.py\r\n\r\n* hide privs from tex_mobject.py\r\n\r\n* hide privs from code_mobject.py\r\n\r\n* hide privs from svg_mobject.py\r\n\r\n* remove SVGPath and utils from __init__.py\r\n\r\n* don't import string_to_numbers\r\n\r\n* hide privs from geometry.py\r\n\r\n* hide privs from matrix.py\r\n\r\n* hide privs from numbers.py\r\n\r\n* hide privs from three_dimensions.py\r\n\r\n* forgot underscore under set_stroke_width_from_length\r\n\r\n* there were more i missed\r\n\r\n* unhidea method that was used in docs\r\n\r\n* forgot other text2hash\r\n\r\n* remove svg_path from docs", + "code": "def _handle_transforms(self, element, mobject):\n \n\n if element.hasAttribute(\"x\") and element.hasAttribute(\"y\"):\n x = self._attribute_to_float(element.getAttribute(\"x\"))\n # Flip y\n y = -self._attribute_to_float(element.getAttribute(\"y\"))\n mobject.shift(x * RIGHT + y * UP)\n\n transform_attr_value = element.getAttribute(\"transform\")\n\n # parse the various transforms in the attribute value\n transform_names = [\"matrix\", \"translate\", \"scale\", \"rotate\", \"skewX\", \"skewY\"]\n\n # Borrowed/Inspired from:\n # https://github.com/cjlano/svg/blob/3ea3384457c9780fa7d67837c9c5fd4ebc42cb3b/svg/svg.py#L75\n\n # match any SVG transformation with its parameter (until final parenthesis)\n # [^)]* == anything but a closing parenthesis\n # '|'.join == OR-list of SVG transformations\n transform_regex = \"|\".join([x + r\"[^)]*\\)\" for x in transform_names])\n transforms = re.findall(transform_regex, transform_attr_value)\n\n number_regex = r\"[-+]?(?:\\d+(?:\\.\\d*)?|\\.\\d+)(?:[eE][-+]?\\d+)?\"\n\n for t in transforms:\n op_name, op_args = t.split(\"(\")\n op_name = op_name.strip()\n op_args = [float(x) for x in re.findall(number_regex, op_args)]\n\n if op_name == \"matrix\":\n transform_args = np.array(op_args).reshape([3, 2])\n x = transform_args[2][0]\n y = -transform_args[2][1]\n matrix = np.identity(self.dim)\n matrix[:2, :2] = transform_args[:2, :]\n matrix[1] *= -1\n matrix[:, 1] *= -1\n\n for mob in mobject.family_members_with_points():\n if config[\"renderer\"] == \"opengl\":\n mob.points = np.dot(mob.points, matrix)\n else:\n mob.points = np.dot(mob.points, matrix)\n mobject.shift(x * RIGHT + y * UP)\n\n elif op_name == \"scale\":\n scale_values = op_args\n if len(scale_values) == 2:\n scale_x, scale_y = scale_values\n mobject.scale(np.array([scale_x, scale_y, 1]), about_point=ORIGIN)\n elif len(scale_values) == 1:\n scale = scale_values[0]\n mobject.scale(np.array([scale, scale, 1]), about_point=ORIGIN)\n\n elif op_name == \"translate\":\n if len(op_args) == 2:\n x, y = op_args\n else:\n x = op_args\n y = 0\n mobject.shift(x * RIGHT + y * DOWN)\n\n else:\n # TODO: handle rotate, skewX and skewY\n # for now adding a warning message\n logger.warning(\n \"Handling of %s transform is not supported yet!\",\n op_name,\n )\n", + "url": "https://github.com/ManimCommunity/manim.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 18, + "n_whitespaces": 1007, + "n_words": 245, + "vocab_size": 143, + "complexity": 14, + "nloc": 48, + "token_counts": 429, + "n_ast_nodes": 706, + "n_identifiers": 48, + "d_id": 46070, + "documentation": { + "docstring": "Applies the SVG transform to the specified mobject. Transforms include:\n ``matrix``, ``translate``, and ``scale``.\n\n Parameters\n ----------\n element : :class:`minidom.Element`\n The transform command to perform\n\n mobject : :class:`Mobject`\n The Mobject to transform.\n ", + "n_words": 31, + "vocab_size": 25, + "n_whitespaces": 95, + "language": "en" + } + }, + { + "id": 181835, + "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", + "repo": "tpot", + "path": "tpot/base.py", + "file_name": "base.py", + "fun_name": "fit_predict", + "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", + "code": "def fit_predict(self, features, target, sample_weight=None, groups=None):\n \n self.fit(features, target, sample_weight=sample_weight, groups=groups)\n\n return self.predict(features)\n", + "url": "https://github.com/EpistasisLab/tpot.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 33, + "n_words": 12, + "vocab_size": 11, + "complexity": 1, + "nloc": 3, + "token_counts": 41, + "n_ast_nodes": 60, + "n_identifiers": 8, + "d_id": 43611, + "documentation": { + "docstring": "Call fit and predict in sequence.\n\n Parameters\n ----------\n features: array-like {n_samples, n_features}\n Feature matrix\n target: array-like {n_samples}\n List of class labels for prediction\n sample_weight: array-like {n_samples}, optional\n Per-sample weights. Higher weights force TPOT to put more emphasis on those points\n groups: array-like, with shape {n_samples, }, optional\n Group labels for the samples used when performing cross-validation.\n This parameter should only be used in conjunction with sklearn's Group cross-validation\n functions, such as sklearn.model_selection.GroupKFold\n\n Returns\n ----------\n array-like: {n_samples}\n Predicted target for the provided features\n\n ", + "n_words": 82, + "vocab_size": 68, + "n_whitespaces": 229, + "language": "en" + } + }, + { + "id": 308619, + "commit_id": "0bcad5579b806284ae0c565bb27ca59ea061b4a1", + "repo": "core", + "path": "tests/components/nest/test_climate_sdm.py", + "file_name": "test_climate_sdm.py", + "fun_name": "test_thermostat_missing_set_point", + "commit_message": "Set nest climate hvac_action to report idle when hvac mode is not off (#62811)", + "code": "async def test_thermostat_missing_set_point(hass):\n \n await setup_climate(\n hass,\n {\n \"sdm.devices.traits.ThermostatHvac\": {\"status\": \"OFF\"},\n \"sdm.devices.traits.ThermostatMode\": {\n \"availableModes\": [\"HEAT\", \"COOL\", \"HEATCOOL\", \"OFF\"],\n \"mode\": \"HEATCOOL\",\n },\n },\n )\n\n assert len(hass.states.async_all()) == 1\n thermostat = hass.states.get(\"climate.my_thermostat\")\n assert thermostat is not None\n assert thermostat.state == HVAC_MODE_HEAT_COOL\n assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE\n assert thermostat.attributes[ATTR_CURRENT_TEMPERATURE] is None\n assert set(thermostat.attributes[ATTR_HVAC_MODES]) == {\n HVAC_MODE_HEAT,\n HVAC_MODE_COOL,\n HVAC_MODE_HEAT_COOL,\n HVAC_MODE_OFF,\n }\n assert thermostat.attributes[ATTR_TEMPERATURE] is None\n assert thermostat.attributes[ATTR_TARGET_TEMP_LOW] is None\n assert thermostat.attributes[ATTR_TARGET_TEMP_HIGH] is None\n assert ATTR_PRESET_MODE not in thermostat.attributes\n assert ATTR_PRESET_MODES not in thermostat.attributes\n assert ATTR_FAN_MODE not in thermostat.attributes\n assert ATTR_FAN_MODES not in thermostat.attributes\n\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 253, + "n_words": 87, + "vocab_size": 49, + "complexity": 1, + "nloc": 30, + "token_counts": 172, + "n_ast_nodes": 275, + "n_identifiers": 26, + "d_id": 107367, + "documentation": { + "docstring": "Test a thermostat missing many thermostat traits in api response.", + "n_words": 10, + "vocab_size": 9, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 217944, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/imaplib.py", + "file_name": "imaplib.py", + "fun_name": "setacl", + "commit_message": "add python 3.10.4 for windows", + "code": "def setacl(self, mailbox, who, what):\n \n return self._simple_command('SETACL', mailbox, who, what)\n\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 24, + "n_words": 10, + "vocab_size": 8, + "complexity": 1, + "nloc": 2, + "token_counts": 25, + "n_ast_nodes": 38, + "n_identifiers": 6, + "d_id": 55029, + "documentation": { + "docstring": "Set a mailbox acl.\n\n (typ, [data]) = .setacl(mailbox, who, what)\n ", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 24, + "language": "en" + } + }, + { + "id": 42078, + "commit_id": "6460a21555ba6557e1f6f06f4d677d9c19148169", + "repo": "seaborn", + "path": "seaborn/utils.py", + "file_name": "utils.py", + "fun_name": "_disable_autolayout", + "commit_message": "Workaround for matplotlib rc_context issue (#2925)\n\n* Workaround for matplotlib rc_context issue\r\n\r\nFixes #2914\r\n\r\n* Add some additional comments about this workaround", + "code": "def _disable_autolayout():\n \n # This is a workaround for an issue in matplotlib, for details see\n # https://github.com/mwaskom/seaborn/issues/2914\n # The only affect of this rcParam is to set the default value for\n # layout= in plt.figure, so we could just do that instead.\n # But then we would need to own the complexity of the transition\n # from tight_layout=True -> layout=\"tight\". This seems easier,\n # but can be removed when (if) that is simpler on the matplotlib side,\n # or if the layout algorithms are improved to handle figure legends.\n orig_val = mpl.rcParams[\"figure.autolayout\"]\n try:\n mpl.rcParams[\"figure.autolayout\"] = False\n yield\n finally:\n mpl.rcParams[\"figure.autolayout\"] = orig_val\n", + "url": "https://github.com/mwaskom/seaborn.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 158, + "n_words": 101, + "vocab_size": 74, + "complexity": 2, + "nloc": 7, + "token_counts": 34, + "n_ast_nodes": 73, + "n_identifiers": 4, + "d_id": 7478, + "documentation": { + "docstring": "Context manager for preventing rc-controlled auto-layout behavior.", + "n_words": 7, + "vocab_size": 7, + "n_whitespaces": 6, + "language": "en" + } + }, + { + "id": 101363, + "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", + "repo": "faceswap", + "path": "scripts/convert.py", + "file_name": "convert.py", + "fun_name": "_get_frame_ranges", + "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", + "code": "def _get_frame_ranges(self) -> Optional[List[Tuple[int, int]]]:\n \n if not self._args.frame_ranges:\n logger.debug(\"No frame range set\")\n return None\n\n minframe, maxframe = None, None\n if self._images.is_video:\n minframe, maxframe = 1, self._images.count\n else:\n indices = [int(self._imageidxre.findall(os.path.basename(filename))[0])\n for filename in self._images.file_list]\n if indices:\n minframe, maxframe = min(indices), max(indices)\n logger.debug(\"minframe: %s, maxframe: %s\", minframe, maxframe)\n\n if minframe is None or maxframe is None:\n raise FaceswapError(\"Frame Ranges specified, but could not determine frame numbering \"\n \"from filenames\")\n\n retval = []\n for rng in self._args.frame_ranges:\n if \"-\" not in rng:\n raise FaceswapError(\"Frame Ranges not specified in the correct format\")\n start, end = rng.split(\"-\")\n retval.append((max(int(start), minframe), min(int(end), maxframe)))\n logger.debug(\"frame ranges: %s\", retval)\n return retval\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 18, + "n_whitespaces": 362, + "n_words": 103, + "vocab_size": 70, + "complexity": 9, + "nloc": 34, + "token_counts": 206, + "n_ast_nodes": 336, + "n_identifiers": 32, + "d_id": 20778, + "documentation": { + "docstring": " Obtain the frame ranges that are to be converted.\n\n If frame ranges have been specified, then split the command line formatted arguments into\n ranges that can be used.\n\n Returns\n list or ``None``\n A list of frames to be processed, or ``None`` if the command line argument was not\n used\n ", + "n_words": 49, + "vocab_size": 35, + "n_whitespaces": 108, + "language": "en" + } + }, + { + "id": 32334, + "commit_id": "99eb9b523f9b9ea6096323ce5610ce6633acc88a", + "repo": "transformers", + "path": "examples/pytorch/test_accelerate_examples.py", + "file_name": "test_accelerate_examples.py", + "fun_name": "test_run_translation_no_trainer", + "commit_message": "Fix `no_trainer` CI (#18242)\n\n* Fix all tests", + "code": "def test_run_translation_no_trainer(self):\n tmp_dir = self.get_auto_remove_tmp_dir()\n testargs = f.split()\n\n run_command(self._launch_args + testargs)\n result = get_results(tmp_dir)\n self.assertGreaterEqual(result[\"eval_bleu\"], 30)\n self.assertTrue(os.path.exists(os.path.join(tmp_dir, \"epoch_0\")))\n self.assertTrue(os.path.exists(os.path.join(tmp_dir, \"translation_no_trainer\")))\n", + "url": "https://github.com/huggingface/transformers.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 68, + "n_words": 20, + "vocab_size": 17, + "complexity": 1, + "nloc": 25, + "token_counts": 89, + "n_ast_nodes": 159, + "n_identifiers": 17, + "d_id": 5908, + "documentation": { + "docstring": "\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ", + "n_words": 25, + "vocab_size": 22, + "n_whitespaces": 209, + "language": "en" + } + }, + { + "id": 322992, + "commit_id": "93cae49c0c572b5c1ac972759140fbe924b0374d", + "repo": "PaddleNLP", + "path": "examples/model_interpretation/task/similarity/simnet/utils.py", + "file_name": "utils.py", + "fun_name": "convert_example", + "commit_message": "Add NLP model interpretation (#1752)\n\n* upload NLP interpretation\r\n\r\n* fix problems and relocate project\r\n\r\n* remove abandoned picture\r\n\r\n* remove abandoned picture\r\n\r\n* fix dead link in README\r\n\r\n* fix dead link in README\r\n\r\n* fix code style problems\r\n\r\n* fix CR round 1\r\n\r\n* remove .gitkeep files\r\n\r\n* fix code style\r\n\r\n* fix file encoding problem\r\n\r\n* fix code style\r\n\r\n* delete duplicated files due to directory rebuild\r\n\r\n* fix CR round 2\r\n\r\n* fix code style\r\n\r\n* fix ernie tokenizer\r\n\r\n* fix code style\r\n\r\n* fix problem from CR round 1\r\n\r\n* fix bugs\r\n\r\n* fix README\r\n\r\n* remove duplicated files\r\n\r\n* deal with diff of old and new tokenizer results\r\n\r\n* fix CR round 4\r\n\r\n* fix code style\r\n\r\n* add missing dependence\r\n\r\n* fix broken import path\r\n\r\n* move some data file to cloud\r\n\r\n* MRC upper case to lower case\r\n\r\nCo-authored-by: Zeyu Chen \r\nCo-authored-by: binlinquge \r\nCo-authored-by: Guo Sheng ", + "code": "def convert_example(example, tokenizer, is_test=False, language='en'):\n \n if language == 'ch':\n q_name = \"query\"\n t_name = \"title\"\n label = \"label\"\n else:\n q_name = \"sentence1\"\n t_name = \"sentence2\"\n label = \"labels\"\n\n query, title = example[q_name], example[t_name]\n query_ids = np.array(tokenizer.encode(query), dtype=\"int64\")\n query_seq_len = np.array(len(query_ids), dtype=\"int64\")\n title_ids = np.array(tokenizer.encode(title), dtype=\"int64\")\n title_seq_len = np.array(len(title_ids), dtype=\"int64\")\n result = [query_ids, title_ids, query_seq_len, title_seq_len]\n if not is_test:\n label = np.array(example[label], dtype=\"int64\")\n result.append(label)\n return result\n\n", + "url": "https://github.com/PaddlePaddle/PaddleNLP.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 154, + "n_words": 65, + "vocab_size": 43, + "complexity": 3, + "nloc": 19, + "token_counts": 156, + "n_ast_nodes": 266, + "n_identifiers": 21, + "d_id": 118323, + "documentation": { + "docstring": "\n Builds model inputs from a sequence for sequence classification tasks. \n It use `jieba.cut` to tokenize text.\n\n Args:\n example(obj:`list[str]`): List of input data, containing text and label if it have label.\n tokenizer(obj: paddlenlp.data.JiebaTokenizer): It use jieba to cut the chinese string.\n is_test(obj:`False`, defaults to `False`): Whether the example contains label or not.\n\n Returns:\n query_ids(obj:`list[int]`): The list of query ids.\n title_ids(obj:`list[int]`): The list of title ids.\n query_seq_len(obj:`int`): The input sequence query length.\n title_seq_len(obj:`int`): The input sequence title length.\n label(obj:`numpy.array`, data type of int64, optional): The input label if not is_test.\n ", + "n_words": 88, + "vocab_size": 62, + "n_whitespaces": 161, + "language": "en" + } + }, + { + "id": 268291, + "commit_id": "42d8a9daa89907545ebd208f4fd0a9192738c6a6", + "repo": "ansible", + "path": "lib/ansible/plugins/strategy/__init__.py", + "file_name": "__init__.py", + "fun_name": "_load_included_file", + "commit_message": "Prevent double failing hosts for includes in loops (#76928)\n\nFixes #23161", + "code": "def _load_included_file(self, included_file, iterator, is_handler=False):\n \n display.debug(\"loading included file: %s\" % included_file._filename)\n try:\n data = self._loader.load_from_file(included_file._filename)\n if data is None:\n return []\n elif not isinstance(data, list):\n raise AnsibleError(\"included task files must contain a list of tasks\")\n\n ti_copy = self._copy_included_file(included_file)\n\n block_list = load_list_of_blocks(\n data,\n play=iterator._play,\n parent_block=ti_copy.build_parent_block(),\n role=included_file._task._role,\n use_handlers=is_handler,\n loader=self._loader,\n variable_manager=self._variable_manager,\n )\n\n # since we skip incrementing the stats when the task result is\n # first processed, we do so now for each host in the list\n for host in included_file._hosts:\n self._tqm._stats.increment('ok', host.name)\n except AnsibleParserError:\n raise\n except AnsibleError as e:\n if isinstance(e, AnsibleFileNotFound):\n reason = \"Could not find or access '%s' on the Ansible Controller.\" % to_text(e.file_name)\n else:\n reason = to_text(e)\n\n for r in included_file._results:\n r._result['failed'] = True\n\n for host in included_file._hosts:\n tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=reason))\n self._tqm._stats.increment('failures', host.name)\n self._tqm.send_callback('v2_runner_on_failed', tr)\n raise AnsibleError(reason) from e\n\n # finally, send the callback and return the list of blocks loaded\n self._tqm.send_callback('v2_playbook_on_include', included_file)\n display.debug(\"done processing included file\")\n return block_list\n", + "url": "https://github.com/ansible/ansible.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 621, + "n_words": 153, + "vocab_size": 112, + "complexity": 9, + "nloc": 37, + "token_counts": 267, + "n_ast_nodes": 427, + "n_identifiers": 51, + "d_id": 79472, + "documentation": { + "docstring": "\n Loads an included YAML file of tasks, applying the optional set of variables.\n\n Raises AnsibleError exception in case of a failure during including a file,\n in such case the caller is responsible for marking the host(s) as failed\n using PlayIterator.mark_host_failed().\n ", + "n_words": 40, + "vocab_size": 33, + "n_whitespaces": 76, + "language": "en" + } + }, + { + "id": 207677, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "tests/admin_views/tests.py", + "file_name": "tests.py", + "fun_name": "test_named_group_field_choices_change_list", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def test_named_group_field_choices_change_list(self):\n \n link1 = reverse(\"admin:admin_views_fabric_change\", args=(self.fab1.pk,))\n link2 = reverse(\"admin:admin_views_fabric_change\", args=(self.fab2.pk,))\n response = self.client.get(reverse(\"admin:admin_views_fabric_changelist\"))\n fail_msg = (\n \"Changelist table isn't showing the right human-readable values \"\n \"set by a model field 'choices' option named group.\"\n )\n self.assertContains(\n response,\n 'Horizontal' % link1,\n msg_prefix=fail_msg,\n html=True,\n )\n self.assertContains(\n response,\n 'Vertical' % link2,\n msg_prefix=fail_msg,\n html=True,\n )\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 233, + "n_words": 53, + "vocab_size": 41, + "complexity": 1, + "nloc": 20, + "token_counts": 97, + "n_ast_nodes": 160, + "n_identifiers": 16, + "d_id": 52047, + "documentation": { + "docstring": "\n Ensures the admin changelist shows correct values in the relevant column\n for rows corresponding to instances of a model in which a named group\n has been used in the choices option of a field.\n ", + "n_words": 34, + "vocab_size": 27, + "n_whitespaces": 63, + "language": "en" + } + }, + { + "id": 48735, + "commit_id": "48a21aa0eb3a95d32456c2a927eff9552a04231e", + "repo": "django-rest-framework", + "path": "tests/test_routers.py", + "file_name": "test_routers.py", + "fun_name": "test_nonconflicting_mixed_basenames", + "commit_message": "raise ImproperlyConfigured exception if `basename` is not unique (#8438)\n\n* raise ImproperlyConfigured if basename already exists\r\n\r\n* rename already_registered function; return True/False\r\n\r\n* additional basename tests\r\n\r\n* additional basename tests\r\n\r\n* Update rest_framework/routers.py\r\n\r\nCo-authored-by: David Graves \r\nCo-authored-by: Asif Saif Uddin ", + "code": "def test_nonconflicting_mixed_basenames(self):\n \n self.router.register(r'notes', NoteViewSet)\n self.router.register(r'notes_kwduplicate', KWargedNoteViewSet, basename='routertestmodel_kwduplicate')\n self.router.register(r'notes_duplicate', NoteViewSet, basename='routertestmodel_duplicate')\n", + "url": "https://github.com/encode/django-rest-framework.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 38, + "n_words": 10, + "vocab_size": 10, + "complexity": 1, + "nloc": 4, + "token_counts": 47, + "n_ast_nodes": 78, + "n_identifiers": 7, + "d_id": 9586, + "documentation": { + "docstring": "\n Ensure 2 routers with the same model, and a distinct basename\n specified on the second router does not fail\n ", + "n_words": 19, + "vocab_size": 18, + "n_whitespaces": 41, + "language": "en" + } + }, + { + "id": 60859, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_internal/models/wheel.py", + "file_name": "wheel.py", + "fun_name": "find_most_preferred_tag", + "commit_message": "upd; format", + "code": "def find_most_preferred_tag(self, tags, tag_to_priority):\n # type: (List[Tag], Dict[Tag, int]) -> int\n \n return min(\n tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority\n )\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 62, + "n_words": 23, + "vocab_size": 21, + "complexity": 3, + "nloc": 4, + "token_counts": 28, + "n_ast_nodes": 44, + "n_identifiers": 7, + "d_id": 12306, + "documentation": { + "docstring": "Return the priority of the most preferred tag that one of the wheel's file\n tag combinations acheives in the given list of supported tags using the given\n tag_to_priority mapping, where lower priorities are more-preferred.\n\n This is used in place of support_index_min in some cases in order to avoid\n an expensive linear scan of a large list of tags.\n\n :param tags: the PEP 425 tags to check the wheel against.\n :param tag_to_priority: a mapping from tag to priority of that tag, where\n lower is more preferred.\n\n :raises ValueError: If none of the wheel's file tags match one of\n the supported tags.\n ", + "n_words": 100, + "vocab_size": 61, + "n_whitespaces": 178, + "language": "en" + } + }, + { + "id": 268984, + "commit_id": "01c906c4178db5ae03b7eb2d298a052c952a0667", + "repo": "keras", + "path": "keras/saving/saved_model/load.py", + "file_name": "load.py", + "fun_name": "_finalize_config_layers", + "commit_message": "Reorganize RNN layers, cells and wrappers into smaller logically organized files hosted under an `rnn` directory.\n\nPiperOrigin-RevId: 428841673", + "code": "def _finalize_config_layers(layers):\n \n for layer in layers:\n # It is assumed that layers define their unconditional losses after being\n # recreated from the config and built. The exceptions to this\n # are Functional and Sequential models, which only store conditional losses\n # (losses dependent on the inputs) in the config. Unconditional losses like\n # weight regularization must be revived from the SavedModel.\n if _is_graph_network(layer):\n _restore_layer_unconditional_losses(layer)\n\n # Some layers, like Dense, record their activation loss function in the\n # config. However, not all layers do this, so the activation loss may be\n # missing when restored from the config/hdf5.\n # TODO(kathywu): Investigate ways to improve the config to ensure consistent\n # loading behavior between HDF5 and SavedModel.\n _restore_layer_activation_loss(layer)\n\n # Restore metrics list.\n _restore_layer_metrics(layer)\n\n # Restore RNN layer states.\n if (isinstance(layer, base_rnn.RNN) and\n layer.stateful and\n hasattr(_get_keras_attr(layer), 'states')):\n layer.states = getattr(_get_keras_attr(layer), 'states', None)\n for variable in tf.nest.flatten(layer.states):\n backend.track_variable(variable)\n\n # Perform any layer defined finalization of the layer state.\n layer.finalize_state()\n\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 247, + "n_words": 155, + "vocab_size": 107, + "complexity": 7, + "nloc": 13, + "token_counts": 95, + "n_ast_nodes": 169, + "n_identifiers": 22, + "d_id": 79804, + "documentation": { + "docstring": "Runs the final steps of loading Keras Layers from config.", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 176447, + "commit_id": "cc1db275efc709cb964ce88abbfa877798d58c10", + "repo": "networkx", + "path": "networkx/algorithms/approximation/connectivity.py", + "file_name": "connectivity.py", + "fun_name": "local_node_connectivity", + "commit_message": "Minor improvements from general code readthrough (#5414)\n\n* Add deprecated directive to reversed docstring.\r\n\r\n* Add missing dep directives to shpfiles.\r\n\r\n* Remove defn of INF sentinel.\r\n\r\n* typo.\r\n\r\n* str -> comment in forloop.\r\n\r\n* STY: appropriate casing for var name.", + "code": "def local_node_connectivity(G, source, target, cutoff=None):\n \n if target == source:\n raise nx.NetworkXError(\"source and target have to be different nodes.\")\n\n # Maximum possible node independent paths\n if G.is_directed():\n possible = min(G.out_degree(source), G.in_degree(target))\n else:\n possible = min(G.degree(source), G.degree(target))\n\n K = 0\n if not possible:\n return K\n\n if cutoff is None:\n cutoff = float(\"inf\")\n\n exclude = set()\n for i in range(min(possible, cutoff)):\n try:\n path = _bidirectional_shortest_path(G, source, target, exclude)\n exclude.update(set(path))\n K += 1\n except nx.NetworkXNoPath:\n break\n\n return K\n\n", + "url": "https://github.com/networkx/networkx.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 200, + "n_words": 74, + "vocab_size": 56, + "complexity": 7, + "nloc": 21, + "token_counts": 143, + "n_ast_nodes": 232, + "n_identifiers": 23, + "d_id": 41908, + "documentation": { + "docstring": "Compute node connectivity between source and target.\n\n Pairwise or local node connectivity between two distinct and nonadjacent\n nodes is the minimum number of nodes that must be removed (minimum\n separating cutset) to disconnect them. By Menger's theorem, this is equal\n to the number of node independent paths (paths that share no nodes other\n than source and target). Which is what we compute in this function.\n\n This algorithm is a fast approximation that gives an strict lower\n bound on the actual number of node independent paths between two nodes [1]_.\n It works for both directed and undirected graphs.\n\n Parameters\n ----------\n\n G : NetworkX graph\n\n source : node\n Starting node for node connectivity\n\n target : node\n Ending node for node connectivity\n\n cutoff : integer\n Maximum node connectivity to consider. If None, the minimum degree\n of source or target is used as a cutoff. Default value None.\n\n Returns\n -------\n k: integer\n pairwise node connectivity\n\n Examples\n --------\n >>> # Platonic octahedral graph has node connectivity 4\n >>> # for each non adjacent node pair\n >>> from networkx.algorithms import approximation as approx\n >>> G = nx.octahedral_graph()\n >>> approx.local_node_connectivity(G, 0, 5)\n 4\n\n Notes\n -----\n This algorithm [1]_ finds node independents paths between two nodes by\n computing their shortest path using BFS, marking the nodes of the path\n found as 'used' and then searching other shortest paths excluding the\n nodes marked as used until no more paths exist. It is not exact because\n a shortest path could use nodes that, if the path were longer, may belong\n to two different node independent paths. Thus it only guarantees an\n strict lower bound on node connectivity.\n\n Note that the authors propose a further refinement, losing accuracy and\n gaining speed, which is not implemented yet.\n\n See also\n --------\n all_pairs_node_connectivity\n node_connectivity\n\n References\n ----------\n .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for\n Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035\n http://eclectic.ss.uci.edu/~drwhite/working.pdf\n\n ", + "n_words": 314, + "vocab_size": 192, + "n_whitespaces": 494, + "language": "en" + } + }, + { + "id": 212886, + "commit_id": "935e430420f5ac18df67233040ba86359d98a579", + "repo": "PySimpleGUI", + "path": "PySimpleGUI.py", + "file_name": "PySimpleGUI.py", + "fun_name": "easy_print", + "commit_message": "Addition of blocking parameter to debug printing. IF True, then execution of your code is stopped until the \"Quit\" button / \"X\" is clicked on the Debug Window.", + "code": "def easy_print(*args, size=(None, None), end=None, sep=None, location=(None, None), relative_location=(None, None), font=None, no_titlebar=False,\n no_button=False, grab_anywhere=False, keep_on_top=None, do_not_reroute_stdout=True, echo_stdout=False, text_color=None, background_color=None, colors=None, c=None,\n erase_all=False, resizable=True, blocking=None):\n \n if _DebugWin.debug_window is None:\n _DebugWin.debug_window = _DebugWin(size=size, location=location, relative_location=relative_location, font=font, no_titlebar=no_titlebar,\n no_button=no_button, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top,\n do_not_reroute_stdout=do_not_reroute_stdout, echo_stdout=echo_stdout, resizable=resizable, blocking=blocking)\n txt_color, bg_color = _parse_colors_parm(c or colors)\n _DebugWin.debug_window.Print(*args, end=end, sep=sep, text_color=text_color or txt_color, background_color=background_color or bg_color,\n erase_all=erase_all, font=font, blocking=blocking)\n\n", + "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 223, + "n_words": 60, + "vocab_size": 51, + "complexity": 1, + "nloc": 3, + "token_counts": 94, + "n_ast_nodes": 279, + "n_identifiers": 27, + "d_id": 53492, + "documentation": { + "docstring": "\n Works like a \"print\" statement but with windowing options. Routes output to the \"Debug Window\"\n\n In addition to the normal text and background colors, you can use a \"colors\" tuple/string\n The \"colors\" or \"c\" parameter defines both the text and background in a single parm.\n It can be a tuple or a single single. Both text and background colors need to be specified\n colors -(str, str) or str. A combined text/background color definition in a single parameter\n c - (str, str) - Colors tuple has format (foreground, backgrouned)\n c - str - can also be a string of the format \"foreground on background\" (\"white on red\")\n\n :param *args: stuff to output\n :type *args: (Any)\n :param size: (w,h) w=characters-wide, h=rows-high\n :type size: (int, int)\n :param end: end character\n :type end: (str)\n :param sep: separator character\n :type sep: (str)\n :param location: Location of upper left corner of the window\n :type location: (int, int)\n :param relative_location: (x,y) location relative to the default location of the window, in pixels. Normally the window centers. This location is relative to the location the window would be created. Note they can be negative.\n :type relative_location: (int, int)\n :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike\n :type font: (str or (str, int[, str]) or None)\n :param no_titlebar: If True no titlebar will be shown\n :type no_titlebar: (bool)\n :param no_button: don't show button\n :type no_button: (bool)\n :param grab_anywhere: If True: can grab anywhere to move the window (Default = False)\n :type grab_anywhere: (bool)\n :param background_color: color of background\n :type background_color: (str)\n :param text_color: color of the text\n :type text_color: (str)\n :param keep_on_top: If True the window will remain above all current windows\n :type keep_on_top: (bool)\n :param location: Location of upper left corner of the window\n :type location: (int, int)\n :param do_not_reroute_stdout: do not reroute stdout and stderr. If False, both stdout and stderr will reroute to here\n :type do_not_reroute_stdout: (bool)\n :param echo_stdout: If True stdout is sent to both the console and the debug window\n :type echo_stdout: (bool)\n :param colors: Either a tuple or a string that has both the text and background colors\n :type colors: (str) or (str, str)\n :param c: Either a tuple or a string that has both the text and background colors\n :type c: (str) or (str, str)\n :param resizable: if True, the user can resize the debug window. Default is True\n :type resizable: (bool)\n :param erase_all: If True when erase the output before printing\n :type erase_all: (bool)\n :param blocking: if True, makes the window block instead of returning immediately. The \"Quit\" button changers to \"More\"\n :type blocking: (bool | None)\n :return:\n :rtype:\n ", + "n_words": 444, + "vocab_size": 200, + "n_whitespaces": 1135, + "language": "en" + } + }, + { + "id": 264108, + "commit_id": "f57e15ae14d2370cba7a14cfae97d2c29b5c8154", + "repo": "pyinstaller", + "path": "PyInstaller/utils/win32/versioninfo.py", + "file_name": "versioninfo.py", + "fun_name": "load_version_info_from_text_file", + "commit_message": "building: EXE: load version info structure before comparing guts\n\nLoad the version information structure in `EXE` constructor, so that\nthe comparison against cached state is done with the structure instead\nof the filen name. This way, changing the contents of the version\ninformation file triggers rebuild of the EXE.\n\nSplit and clean-up related functions in the `utils.win32.versioninfo`\nmodule as well as in `pyi-grab_version` and `pyi-set_version`\nutility scripts.", + "code": "def load_version_info_from_text_file(filename):\n \n\n # Read and parse the version file. It may have a byte order marker or encoding cookie - respect it if it does.\n import PyInstaller.utils.misc as miscutils\n with open(filename, 'rb') as fp:\n text = miscutils.decode(fp.read())\n\n # Deserialize via eval()\n try:\n info = eval(text)\n except Exception as e:\n raise ValueError(\"Failed to deserialize VSVersionInfo from text-based representation!\") from e\n\n # Sanity check\n assert isinstance(info, VSVersionInfo), \\\n f\"Loaded incompatible structure type! Expected VSVersionInfo, got: {type(info)!r}\"\n\n return info\n\n", + "url": "https://github.com/pyinstaller/pyinstaller.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 134, + "n_words": 76, + "vocab_size": 68, + "complexity": 2, + "nloc": 11, + "token_counts": 69, + "n_ast_nodes": 129, + "n_identifiers": 19, + "d_id": 77609, + "documentation": { + "docstring": "\n Load the `VSVersionInfo` structure from its string-based (`VSVersionInfo.__str__`) serialization by reading the\n text from the file and running it through `eval()`.\n ", + "n_words": 21, + "vocab_size": 18, + "n_whitespaces": 31, + "language": "en" + } + }, + { + "id": 250580, + "commit_id": "ede269fce40ec4000a4717d5f5aec7835d9931c2", + "repo": "mitmproxy", + "path": "mitmproxy/flow.py", + "file_name": "flow.py", + "fun_name": "intercept", + "commit_message": "Flow.intercept: use an Event instead of the reply system\n\nThis is patch 3/4 of the reply-ectomy.", + "code": "def intercept(self):\n \n if self.intercepted:\n return\n self.intercepted = True\n if self._resume_event is not None:\n self._resume_event.clear()\n", + "url": "https://github.com/mitmproxy/mitmproxy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 64, + "n_words": 14, + "vocab_size": 13, + "complexity": 3, + "nloc": 6, + "token_counts": 32, + "n_ast_nodes": 55, + "n_identifiers": 5, + "d_id": 73511, + "documentation": { + "docstring": "\n Intercept this Flow. Processing will stop until resume is\n called.\n ", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 32, + "language": "en" + } + }, + { + "id": 145520, + "commit_id": "6f68c74a5dbfbb936cb675781acb2a36eae10984", + "repo": "ray", + "path": "python/ray/_private/resource_spec.py", + "file_name": "resource_spec.py", + "fun_name": "_autodetect_num_gpus", + "commit_message": "Use GPUtil for gpu detection when available (#18938)\n\nIn Envs with K8S and enabled SELinux there is a bug:\r\n\"/proc/nvidia/\" is not allowed to mount in container\r\nSo, i made a rework for GPU detection based on GPutil package.\r\n\r\n\r\n\r\n## Checks\r\n\r\n- [x] I've run `scripts/format.sh` to lint the changes in this PR.\r\n- [x] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/\r\n- Testing Strategy\r\n - [x] Release tests\r\n\r\nCo-authored-by: Mopga \r\nCo-authored-by: Julius ", + "code": "def _autodetect_num_gpus():\n \n result = 0\n if sys.platform.startswith(\"linux\"):\n if importlib.util.find_spec(\"GPUtil\"):\n gpu_list = GPUtil.getGPUs()\n result = len(gpu_list)\n else:\n proc_gpus_path = \"/proc/driver/nvidia/gpus\"\n if os.path.isdir(proc_gpus_path):\n result = len(os.listdir(proc_gpus_path))\n elif sys.platform == \"win32\":\n props = \"AdapterCompatibility\"\n cmdargs = [\"WMIC\", \"PATH\", \"Win32_VideoController\", \"GET\", props]\n lines = subprocess.check_output(cmdargs).splitlines()[1:]\n result = len([x.rstrip() for x in lines if x.startswith(b\"NVIDIA\")])\n return result\n\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 168, + "n_words": 52, + "vocab_size": 36, + "complexity": 7, + "nloc": 16, + "token_counts": 132, + "n_ast_nodes": 231, + "n_identifiers": 25, + "d_id": 33474, + "documentation": { + "docstring": "Attempt to detect the number of GPUs on this machine.\n\n TODO(rkn): This currently assumes NVIDIA GPUs on Linux.\n TODO(mehrdadn): Use a better mechanism for Windows.\n\n Returns:\n The number of GPUs if any were detected, otherwise 0.\n ", + "n_words": 36, + "vocab_size": 31, + "n_whitespaces": 55, + "language": "en" + } + }, + { + "id": 64391, + "commit_id": "0ca58d762715fd10c751c4497f3037908f4dfb20", + "repo": "erpnext", + "path": "erpnext/patches/v14_0/set_work_order_qty_in_so_from_mr.py", + "file_name": "set_work_order_qty_in_so_from_mr.py", + "fun_name": "execute", + "commit_message": "chore: Patch to update SO work_order_qty and Linter fix", + "code": "def execute():\n \n work_order = frappe.qb.DocType(\"Work Order\")\n query = (\n frappe.qb.from_(work_order)\n .select(\n work_order.name, work_order.produced_qty,\n work_order.material_request,\n work_order.material_request_item,\n work_order.sales_order\n ).where(\n (work_order.material_request.isnotnull())\n & (work_order.material_request_item.isnotnull())\n & (work_order.sales_order.isnotnull())\n & (work_order.docstatus == 1)\n & (work_order.produced_qty > 0)\n )\n )\n results = query.run(as_dict=True)\n\n for row in results:\n so_item = frappe.get_value(\n \"Material Request Item\", row.material_request_item, \"sales_order_item\"\n )\n frappe.db.set_value(\"Work Order\", row.name, \"sales_order_item\", so_item)\n\n if so_item:\n wo = frappe.get_doc(\"Work Order\", row.name)\n wo.update_work_order_qty_in_so()\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 267, + "n_words": 61, + "vocab_size": 51, + "complexity": 3, + "nloc": 26, + "token_counts": 164, + "n_ast_nodes": 264, + "n_identifiers": 27, + "d_id": 13623, + "documentation": { + "docstring": "\n 1. Get submitted Work Orders with MR, MR Item and SO set\n 2. Get SO Item detail from MR Item detail in WO, and set in WO\n 3. Update work_order_qty in SO\n ", + "n_words": 32, + "vocab_size": 21, + "n_whitespaces": 45, + "language": "en" + } + }, + { + "id": 110170, + "commit_id": "8ef4e017f8a95db8704728a5fffd2c0384afc525", + "repo": "matplotlib", + "path": "lib/matplotlib/offsetbox.py", + "file_name": "offsetbox.py", + "fun_name": "_get_packed_offsets", + "commit_message": "Don't pass unused xdescent to _get_packed_offsets.\n\nInstead of passing a list of (widths, xdescents) where xdescent is\nunused, just pass a list of widths. This helper is private so we just\nneed to adjust the call sites and tests with no deprecation.\n\nThis patch is preliminary work for some further cleanup on the offsetbox\nmodule.", + "code": "def _get_packed_offsets(widths, total, sep, mode=\"fixed\"):\n r\n _api.check_in_list([\"fixed\", \"expand\", \"equal\"], mode=mode)\n\n if mode == \"fixed\":\n offsets_ = np.cumsum([0] + [w + sep for w in widths])\n offsets = offsets_[:-1]\n if total is None:\n total = offsets_[-1] - sep\n return total, offsets\n\n elif mode == \"expand\":\n # This is a bit of a hack to avoid a TypeError when *total*\n # is None and used in conjugation with tight layout.\n if total is None:\n total = 1\n if len(widths) > 1:\n sep = (total - sum(widths)) / (len(widths) - 1)\n else:\n sep = 0\n offsets_ = np.cumsum([0] + [w + sep for w in widths])\n offsets = offsets_[:-1]\n return total, offsets\n\n elif mode == \"equal\":\n maxh = max(widths)\n if total is None:\n if sep is None:\n raise ValueError(\"total and sep cannot both be None when \"\n \"using layout mode 'equal'\")\n total = (maxh + sep) * len(widths)\n else:\n sep = total / len(widths) - maxh\n offsets = (maxh + sep) * np.arange(len(widths))\n return total, offsets\n\n", + "url": "https://github.com/matplotlib/matplotlib.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 420, + "n_words": 163, + "vocab_size": 79, + "complexity": 11, + "nloc": 74, + "token_counts": 231, + "n_ast_nodes": 381, + "n_identifiers": 18, + "d_id": 23960, + "documentation": { + "docstring": "\n Pack boxes specified by their *widths*.\n\n For simplicity of the description, the terminology used here assumes a\n horizontal layout, but the function works equally for a vertical layout.\n\n There are three packing *mode*\\s:\n\n - 'fixed': The elements are packed tight to the left with a spacing of\n *sep* in between. If *total* is *None* the returned total will be the\n right edge of the last box. A non-*None* total will be passed unchecked\n to the output. In particular this means that right edge of the last\n box may be further to the right than the returned total.\n\n - 'expand': Distribute the boxes with equal spacing so that the left edge\n of the first box is at 0, and the right edge of the last box is at\n *total*. The parameter *sep* is ignored in this mode. A total of *None*\n is accepted and considered equal to 1. The total is returned unchanged\n (except for the conversion *None* to 1). If the total is smaller than\n the sum of the widths, the laid out boxes will overlap.\n\n - 'equal': If *total* is given, the total space is divided in N equal\n ranges and each box is left-aligned within its subspace.\n Otherwise (*total* is *None*), *sep* must be provided and each box is\n left-aligned in its subspace of width ``(max(widths) + sep)``. The\n total width is then calculated to be ``N * (max(widths) + sep)``.\n\n Parameters\n ----------\n widths : list of float\n Widths of boxes to be packed.\n total : float or None\n Intended total length. *None* if not used.\n sep : float\n Spacing between boxes.\n mode : {'fixed', 'expand', 'equal'}\n The packing mode.\n\n Returns\n -------\n total : float\n The total width needed to accommodate the laid out boxes.\n offsets : array of float\n The left offsets of the boxes.\n ", + "n_words": 298, + "vocab_size": 150, + "n_whitespaces": 460, + "language": "en" + } + }, + { + "id": 278305, + "commit_id": "b0ffc0031e9c1964e7398ca47c6666bbfc0d5086", + "repo": "keras", + "path": "keras/saving/saved_model/load.py", + "file_name": "load.py", + "fun_name": "del_tracking", + "commit_message": "resolve line-too-long in saving", + "code": "def del_tracking(self):\n \n # Now that the node object has been fully loaded, and the checkpoint has\n # been restored, the object no longer needs to track objects added from\n # SerializedAttributes. (Note that saving a training checkpoint still\n # functions correctly, because layers and variables are tracked\n # separately by the Layer object.)\n # TODO(kathywu): Instead of outright deleting these nodes (which would\n # make restoring from a different checkpoint tricky), mark them as extra\n # dependencies that are OK to overwrite.\n for node in self.loaded_nodes.values():\n node = node[0]\n if not isinstance(node, base_layer.Layer):\n # Loaded nodes can contain other trackable objects created when\n # loading layers from the config, such as variables.\n continue\n for name in PUBLIC_ATTRIBUTES:\n node._delete_tracking(name) # pylint: disable=protected-access\n\n if isinstance(node, functional_lib.Functional):\n # Delete the temporary layer dependencies, which were used to\n # restore the checkpointed values. When the model is live, the\n # user can delete or add layers to the model at any time, so\n # these layer dependencies may be obsolete.\n dependencies = list(\n node._self_unconditional_dependency_names\n ) # pylint: disable=protected-access\n for name in dependencies:\n if (\n re.match(r\"^layer(_with_weights)?-[\\d+]\", name)\n is not None\n ):\n node._delete_tracking(\n name\n ) # pylint: disable=protected-access\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 650, + "n_words": 192, + "vocab_size": 122, + "complexity": 7, + "nloc": 19, + "token_counts": 91, + "n_ast_nodes": 161, + "n_identifiers": 18, + "d_id": 82463, + "documentation": { + "docstring": "Removes tracked references that are only used when loading the\n model.", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 17, + "language": "en" + } + }, + { + "id": 166950, + "commit_id": "89be1f053b695c4ce1c0569f737caf3f03c12128", + "repo": "pandas", + "path": "pandas/tests/arrays/floating/conftest.py", + "file_name": "conftest.py", + "fun_name": "all_data", + "commit_message": "DOC: Added docstrings to fixtures defined in array module (#47211)", + "code": "def all_data(request, data, data_missing):\n \n if request.param == \"data\":\n return data\n elif request.param == \"data_missing\":\n return data_missing\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 39, + "n_words": 16, + "vocab_size": 13, + "complexity": 3, + "nloc": 5, + "token_counts": 28, + "n_ast_nodes": 49, + "n_identifiers": 5, + "d_id": 39882, + "documentation": { + "docstring": "Parametrized fixture returning 'data' or 'data_missing' float arrays.\n\n Used to test dtype conversion with and without missing values.\n ", + "n_words": 18, + "vocab_size": 18, + "n_whitespaces": 24, + "language": "en" + } + }, + { + "id": 168829, + "commit_id": "f77dbfb5af93faf9425c5d717a93ea7f6f26b3fd", + "repo": "pandas", + "path": "pandas/core/series.py", + "file_name": "series.py", + "fun_name": "iteritems", + "commit_message": "DOC: Add deprecation marks to deprecated functions (#48183)\n\n* DOC: Add deprecation marks to deprecated functions\r\n\r\n* Address docs\r\n\r\n* docstrings", + "code": "def iteritems(self) -> Iterable[tuple[Hashable, Any]]:\n \n warnings.warn(\n \"iteritems is deprecated and will be removed in a future version. \"\n \"Use .items instead.\",\n FutureWarning,\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n return self.items()\n\n # ----------------------------------------------------------------------\n # Misc public methods\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 110, + "n_words": 32, + "vocab_size": 31, + "complexity": 1, + "nloc": 30, + "token_counts": 43, + "n_ast_nodes": 74, + "n_identifiers": 14, + "d_id": 40335, + "documentation": { + "docstring": "\n Lazily iterate over (index, value) tuples.\n\n .. deprecated:: 1.5.0\n iteritems is deprecated and will be removed in a future version.\n Use .items instead.\n\n This method returns an iterable tuple (index, value). This is\n convenient if you want to create a lazy iterator.\n\n Returns\n -------\n iterable\n Iterable of tuples containing the (index, value) pairs from a\n Series.\n\n See Also\n --------\n Series.items : Recommended alternative.\n DataFrame.items : Iterate over (column name, Series) pairs.\n DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.\n ", + "n_words": 81, + "vocab_size": 65, + "n_whitespaces": 217, + "language": "en" + } + }, + { + "id": 132382, + "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", + "repo": "ray", + "path": "python/ray/tune/tests/test_api.py", + "file_name": "test_api.py", + "fun_name": "checkAndReturnConsistentLogs", + "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", + "code": "def checkAndReturnConsistentLogs(self, results, sleep_per_iter=None):\n \n class_results = copy.deepcopy(results)\n function_results = copy.deepcopy(results)\n\n class_output = []\n function_output = []\n scheduler_notif = []\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 61, + "n_words": 19, + "vocab_size": 12, + "complexity": 4, + "nloc": 63, + "token_counts": 306, + "n_ast_nodes": 67, + "n_identifiers": 11, + "d_id": 29747, + "documentation": { + "docstring": "Checks logging is the same between APIs.\n\n Ignore \"DONE\" for logging but checks that the\n scheduler is notified properly with the last result.\n ", + "n_words": 23, + "vocab_size": 19, + "n_whitespaces": 44, + "language": "en" + } + }, + { + "id": 77078, + "commit_id": "1822d7eee23cf5fceff8b1f58f3ca2f0a32c6e34", + "repo": "wagtail", + "path": "wagtail/admin/tests/test_widgets.py", + "file_name": "test_widgets.py", + "fun_name": "test_tags_help_text_spaces_allowed", + "commit_message": "display help text message for tag field\n\n- resolves #1874\n- ensure message is dynamic based on the setting TAG_SPACES_ALLOWED\n- Update wagtail/admin/templates/wagtailadmin/widgets/tag_widget.html", + "code": "def test_tags_help_text_spaces_allowed(self):\n \n widget = widgets.AdminTagWidget()\n help_text = widget.get_context(None, None, {})[\"widget\"][\"help_text\"]\n\n html = widget.render(\"tags\", None, {})\n help_text_html_element = self.get_help_text_html_element(html)\n\n self.assertEqual(\n help_text,\n 'Multi-word tags with spaces will automatically be enclosed in double quotes (\").',\n )\n\n self.assertHTMLEqual(\n help_text_html_element,\n % help_text,\n )\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 146, + "n_words": 38, + "vocab_size": 32, + "complexity": 1, + "nloc": 13, + "token_counts": 73, + "n_ast_nodes": 123, + "n_identifiers": 13, + "d_id": 16620, + "documentation": { + "docstring": "Checks that the tags help text html element content is correct when TAG_SPACES_ALLOWED is True

%s

", + "n_words": 16, + "vocab_size": 15, + "n_whitespaces": 15, + "language": "en" + } + }, + { + "id": 181840, + "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", + "repo": "tpot", + "path": "tpot/base.py", + "file_name": "base.py", + "fun_name": "clean_pipeline_string", + "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", + "code": "def clean_pipeline_string(self, individual):\n \n dirty_string = str(individual)\n # There are many parameter prefixes in the pipeline strings, used solely for\n # making the terminal name unique, eg. LinearSVC__.\n parameter_prefixes = [\n (m.start(), m.end()) for m in re.finditer(\", [\\w]+__\", dirty_string)\n ]\n # We handle them in reverse so we do not mess up indices\n pretty = dirty_string\n for (start, end) in reversed(parameter_prefixes):\n pretty = pretty[: start + 2] + pretty[end:]\n\n return pretty\n", + "url": "https://github.com/EpistasisLab/tpot.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 162, + "n_words": 70, + "vocab_size": 55, + "complexity": 3, + "nloc": 9, + "token_counts": 74, + "n_ast_nodes": 120, + "n_identifiers": 13, + "d_id": 43614, + "documentation": { + "docstring": "Provide a string of the individual without the parameter prefixes.\n\n Parameters\n ----------\n individual: individual\n Individual which should be represented by a pretty string\n\n Returns\n -------\n A string like str(individual), but with parameter prefixes removed.\n\n ", + "n_words": 34, + "vocab_size": 28, + "n_whitespaces": 94, + "language": "en" + } + }, + { + "id": 301868, + "commit_id": "62a5854e40cb554fecb1eec897d7bcb4c94628fe", + "repo": "core", + "path": "homeassistant/components/feedreader/__init__.py", + "file_name": "__init__.py", + "fun_name": "put_timestamp", + "commit_message": "Fix bare except (#72906)", + "code": "def put_timestamp(self, feed_id, timestamp):\n \n self._fetch_data()\n with self._lock, open(self._data_file, \"wb\") as myfile:\n self._data.update({feed_id: timestamp})\n _LOGGER.debug(\n \"Overwriting feed %s timestamp in storage file %s\",\n feed_id,\n self._data_file,\n )\n try:\n pickle.dump(self._data, myfile)\n except Exception: # pylint: disable=broad-except\n _LOGGER.error(\"Error saving pickled data to %s\", self._data_file)\n self._cache_outdated = True\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 202, + "n_words": 43, + "vocab_size": 41, + "complexity": 2, + "nloc": 14, + "token_counts": 86, + "n_ast_nodes": 144, + "n_identifiers": 18, + "d_id": 100706, + "documentation": { + "docstring": "Update timestamp for given feed id (usually the url).", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 130998, + "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", + "repo": "ray", + "path": "python/ray/serve/tests/test_deployment_state.py", + "file_name": "test_deployment_state.py", + "fun_name": "test_deploy_with_transient_constructor_failure", + "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", + "code": "def test_deploy_with_transient_constructor_failure(mock_deployment_state):\n \n deployment_state, timer, goal_manager = mock_deployment_state\n\n b_info_1, b_version_1 = deployment_info(num_replicas=2)\n create_goal, updating = deployment_state.deploy(b_info_1)\n goal_obj = goal_manager.get_goal(create_goal)\n\n # Burn 4 retries from both replicas.\n deleted = _constructor_failure_loop_two_replica(deployment_state, 2)\n assert not deleted\n\n # Let both replicas succeed in last try.\n deployment_state.update()\n check_counts(deployment_state, total=2, by_state=[(ReplicaState.STARTING, 2)])\n\n assert deployment_state._replica_constructor_retry_counter == 4\n replica_1 = deployment_state._replicas.get()[0]\n replica_2 = deployment_state._replicas.get()[1]\n\n replica_1._actor.set_ready()\n replica_2._actor.set_ready()\n deployment_state.update()\n check_counts(deployment_state, total=2, by_state=[(ReplicaState.RUNNING, 2)])\n\n assert deployment_state._replica_constructor_retry_counter == 4\n assert goal_manager.check_complete(create_goal)\n assert goal_obj.exception is None\n\n\n@pytest.fixture", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "@pytest.fixture", + "n_ast_errors": 1, + "ast_levels": 11, + "n_whitespaces": 135, + "n_words": 73, + "vocab_size": 52, + "complexity": 1, + "nloc": 19, + "token_counts": 165, + "n_ast_nodes": 268, + "n_identifiers": 34, + "d_id": 29444, + "documentation": { + "docstring": "\n Test deploy() multiple replicas with transient constructor failure.\n Ensures:\n 1) Async goal manager can correctly recognize deployment goal as\n successful\n 2) There should be expected # of RUNNING replicas eventually that\n matches user intent\n 3) Replica counter set as -1 to stop tracking current goal as it's\n already completed\n\n Same testing for same test case in test_deploy.py.\n ", + "n_words": 57, + "vocab_size": 52, + "n_whitespaces": 124, + "language": "en" + } + }, + { + "id": 101978, + "commit_id": "2e8ef5e3c8f2df0f1cca9b342baa8aaa6f620650", + "repo": "faceswap", + "path": "lib/gui/utils/image.py", + "file_name": "image.py", + "fun_name": "image", + "commit_message": "GUI - Preview updates\n - Training preview. Embed preview pop-out window\n - Bugfix - convert/extract previews", + "code": "def image(self) -> ImageTk.PhotoImage:\n \n assert self._preview_image_tk is not None\n return self._preview_image_tk\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 7, + "n_whitespaces": 32, + "n_words": 11, + "vocab_size": 10, + "complexity": 1, + "nloc": 4, + "token_counts": 21, + "n_ast_nodes": 35, + "n_identifiers": 5, + "d_id": 21352, + "documentation": { + "docstring": ":class:`PIL.ImageTk.PhotoImage` The preview image for displaying in a tkinter canvas ", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 154556, + "commit_id": "e5b1888cd932909e49194d58035da34b210b91c4", + "repo": "modin", + "path": "modin/experimental/core/execution/native/implementations/hdk_on_native/dataframe/dataframe.py", + "file_name": "dataframe.py", + "fun_name": "_join_by_index", + "commit_message": "FEAT-#4946: Replace OmniSci with HDK (#4947)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Andrey Pavlenko ", + "code": "def _join_by_index(self, other_modin_frames, how, sort, ignore_index):\n \n if how == \"outer\":\n raise NotImplementedError(\"outer join is not supported in HDK engine\")\n\n lhs = self._maybe_materialize_rowid()\n reset_index_names = False\n for rhs in other_modin_frames:\n rhs = rhs._maybe_materialize_rowid()\n if len(lhs._index_cols) != len(rhs._index_cols):\n raise NotImplementedError(\n \"join by indexes with different sizes is not supported\"\n )\n\n reset_index_names = reset_index_names or lhs._index_cols != rhs._index_cols\n\n condition = lhs._build_equi_join_condition(\n rhs, lhs._index_cols, rhs._index_cols\n )\n\n exprs = lhs._index_exprs()\n new_columns = lhs.columns.to_list()\n for col in lhs.columns:\n exprs[col] = lhs.ref(col)\n for col in rhs.columns:\n # Handle duplicating column names here. When user specifies\n # suffixes to make a join, actual renaming is done in front-end.\n new_col_name = col\n rename_idx = 0\n while new_col_name in exprs:\n new_col_name = f\"{col}{rename_idx}\"\n rename_idx += 1\n exprs[new_col_name] = rhs.ref(col)\n new_columns.append(new_col_name)\n\n op = JoinNode(\n lhs,\n rhs,\n how=how,\n exprs=exprs,\n condition=condition,\n )\n\n new_columns = Index.__new__(\n Index, data=new_columns, dtype=self.columns.dtype\n )\n lhs = lhs.__constructor__(\n dtypes=lhs._dtypes_for_exprs(exprs),\n columns=new_columns,\n index_cols=lhs._index_cols,\n op=op,\n force_execution_mode=self._force_execution_mode,\n )\n\n if sort:\n lhs = lhs.sort_rows(\n lhs._index_cols,\n ascending=True,\n ignore_index=False,\n na_position=\"last\",\n )\n\n if reset_index_names:\n lhs = lhs._reset_index_names()\n\n if ignore_index:\n new_columns = Index.__new__(RangeIndex, data=range(len(lhs.columns)))\n lhs = lhs._set_columns(new_columns)\n\n return lhs\n", + "url": "https://github.com/modin-project/modin.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 912, + "n_words": 171, + "vocab_size": 113, + "complexity": 11, + "nloc": 57, + "token_counts": 315, + "n_ast_nodes": 498, + "n_identifiers": 44, + "d_id": 36066, + "documentation": { + "docstring": "\n Perform equi-join operation for multiple frames by index columns.\n\n Parameters\n ----------\n other_modin_frames : list of HdkOnNativeDataframe\n Frames to join with.\n how : str\n A type of join.\n sort : bool\n Sort the result by join keys.\n ignore_index : bool\n If True then reset column index for the resulting frame.\n\n Returns\n -------\n HdkOnNativeDataframe\n The new frame.\n ", + "n_words": 55, + "vocab_size": 43, + "n_whitespaces": 188, + "language": "en" + } + }, + { + "id": 106288, + "commit_id": "92d73ef3936ed6de9770f613fddf2260731becc9", + "repo": "youtube-dl", + "path": "youtube_dl/downloader/__init__.py", + "file_name": "__init__.py", + "fun_name": "_get_suitable_downloader", + "commit_message": "[niconico] Implement heartbeat for download", + "code": "def _get_suitable_downloader(info_dict, params={}):\n \n\n # if (info_dict.get('start_time') or info_dict.get('end_time')) and not info_dict.get('requested_formats') and FFmpegFD.can_download(info_dict):\n # return FFmpegFD\n\n external_downloader = params.get('external_downloader')\n if external_downloader is not None:\n ed = get_external_downloader(external_downloader)\n if ed.can_download(info_dict):\n return ed\n\n protocol = info_dict['protocol']\n if protocol.startswith('m3u8') and info_dict.get('is_live'):\n return FFmpegFD\n\n if protocol == 'm3u8' and params.get('hls_prefer_native') is True:\n return HlsFD\n\n if protocol == 'm3u8_native' and params.get('hls_prefer_native') is False:\n return FFmpegFD\n\n return PROTOCOL_MAP.get(protocol, HttpFD)\n\n\n__all__ = [\n 'get_suitable_downloader',\n 'FileDownloader',\n]\n", + "url": "https://github.com/ytdl-org/youtube-dl.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 153, + "n_words": 69, + "vocab_size": 40, + "complexity": 9, + "nloc": 14, + "token_counts": 105, + "n_ast_nodes": 200, + "n_identifiers": 15, + "d_id": 22343, + "documentation": { + "docstring": "Get the downloader class that can handle the info dict.", + "n_words": 10, + "vocab_size": 9, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 93148, + "commit_id": "d3b8c9dd7bef6bccb5e70d2ccf3cda8463444a34", + "repo": "sentry", + "path": "tests/snuba/api/endpoints/test_organization_events_mep.py", + "file_name": "test_organization_events_mep.py", + "fun_name": "test_having_condition_with_preventing_aggregate_metrics_only", + "commit_message": "chore(discover): Cleanup events tests (#36797)\n\n- Delete the deprecated eventsv2 tests\r\n- Move MEP tests to its own file", + "code": "def test_having_condition_with_preventing_aggregate_metrics_only(self):\n \n response = self.do_request(\n {\n \"field\": [\"transaction\", \"project\", \"p50(transaction.duration)\"],\n \"query\": \"event.type:transaction p50(transaction.duration):<50\",\n \"dataset\": \"metrics\",\n \"preventMetricAggregates\": \"1\",\n \"per_page\": 50,\n \"project\": self.project.id,\n }\n )\n assert response.status_code == 400, response.content\n", + "url": "https://github.com/getsentry/sentry.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 168, + "n_words": 28, + "vocab_size": 28, + "complexity": 1, + "nloc": 12, + "token_counts": 59, + "n_ast_nodes": 109, + "n_identifiers": 8, + "d_id": 18970, + "documentation": { + "docstring": "same as the previous test, but with the dataset on explicit metrics\n which should throw a 400 error instead", + "n_words": 19, + "vocab_size": 18, + "n_whitespaces": 25, + "language": "en" + } + }, + { + "id": 185184, + "commit_id": "3f0955cbe5405bdb3d1dda756ee3a1e000695dff", + "repo": "textual", + "path": "tests/test_xterm_parser.py", + "file_name": "test_xterm_parser.py", + "fun_name": "test_unknown_sequence_followed_by_known_sequence", + "commit_message": "fix tests", + "code": "def test_unknown_sequence_followed_by_known_sequence(parser, chunk_size):\n \n unknown_sequence = \"\\x1b[?\"\n known_sequence = \"\\x1b[8~\" # key = 'end'\n\n sequence = unknown_sequence + known_sequence\n\n events = []\n parser.more_data = lambda: True\n for chunk in chunks(sequence, chunk_size):\n events.append(parser.feed(chunk))\n\n events = list(itertools.chain.from_iterable(list(event) for event in events))\n\n assert [event.key for event in events] == [\n \"circumflex_accent\",\n \"left_square_bracket\",\n \"question_mark\",\n \"end\",\n ]\n\n", + "url": "https://github.com/Textualize/textual.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 117, + "n_words": 51, + "vocab_size": 36, + "complexity": 4, + "nloc": 15, + "token_counts": 92, + "n_ast_nodes": 156, + "n_identifiers": 18, + "d_id": 44934, + "documentation": { + "docstring": "When we feed the parser an unknown sequence followed by a known\n sequence. The characters in the unknown sequence are delivered as keys,\n and the known escape sequence that follows is delivered as expected.\n ", + "n_words": 34, + "vocab_size": 26, + "n_whitespaces": 43, + "language": "en" + } + }, + { + "id": 82563, + "commit_id": "b8750ebc0ebaa52ec51945f1d4824a80d806f479", + "repo": "django-cms", + "path": "cms/tests/test_toolbar.py", + "file_name": "test_toolbar.py", + "fun_name": "test_block_tag", + "commit_message": "ci: sync isort line length (#7353)", + "code": "def test_block_tag(self):\n user = self.get_staff()\n page = create_page('Test', 'col_two.html', 'en', published=True)\n ex1 = Example1(\n date_field=datetime.date(2012, 1, 1),\n **FOUR_CHARS\n )\n ex1.save()\n\n # This template does not render anything as content is saved in a\n # variable and never inserted in the page\n template_text = \n request = self.get_page_request(page, user, edit=True)\n response = detail_view(request, ex1.pk, template_string=template_text)\n self.assertNotContains(\n response,\n ''\n ''\n ''.format(\n 'placeholderapp', 'example1', ex1.pk\n )\n )\n\n # This template does not render anything as content is saved in a\n # variable and inserted in the page afterwards\n template_text = \n request = self.get_page_request(page, user, edit=True)\n response = detail_view(request, ex1.pk, template_string=template_text)\n # Assertions on the content of the block tag\n self.assertContains(\n response,\n ''.format(\n 'placeholderapp', 'example1', ex1.pk\n )\n )\n self.assertContains(\n response,\n \"edit_plugin: '{}?language={}&edit_fields=changelist'\".format(\n admin_reverse('placeholderapp_example1_changelist'), 'en'\n )\n )\n", + "url": "https://github.com/django-cms/django-cms.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 921, + "n_words": 254, + "vocab_size": 97, + "complexity": 1, + "nloc": 114, + "token_counts": 379, + "n_ast_nodes": 672, + "n_identifiers": 31, + "d_id": 17440, + "documentation": { + "docstring": "{% extends \"base.html\" %}\n{% load cms_tags %}\n\n{% block content %}\n{% render_model_block instance as rendered_model %}\n {{ instance }}\n

{{ instance.char_1 }} - {{ instance.char_2 }}

\n {{ instance.date_field|date:\"Y\" }}\n {% if instance.char_1 %}\n successful if\n {% endif %}\n{% endrender_model_block %}\n{% endblock content %}\n{% extends \"base.html\" %}\n{% load cms_tags %}\n\n{% block content %}\n{% render_model_block instance as rendered_model %}\n {{ instance }}\n

{{ instance.char_1 }} - {{ instance.char_2 }}

\n {{ instance.date_field|date:\"Y\" }}\n {% if instance.char_1 %}\n successful if\n {% endif %}\n{% endrender_model_block %}\n{{ rendered_model }}\n{% endblock content %}\n{% extends \"base.html\" %}\n{% load cms_tags %}\n\n{% block content %}\n{% render_model_block instance %}\n {{ instance }}\n

{{ instance.char_1 }} - {{ instance.char_2 }}

\n {{ instance.date_field|date:\"Y\" }}\n {% if instance.char_1 %}\n successful if\n {% endif %}\n{% endrender_model_block %}\n{% endblock content %}\n{% extends \"base.html\" %}\n{% load cms_tags %}\n\n{% block content %}\n{% render_model_block instance 'changelist' %}\n {{ instance }}\n{% endrender_model_block %}\n{% endblock content %}\n", + "n_words": 186, + "vocab_size": 35, + "n_whitespaces": 221, + "language": "en" + } + }, + { + "id": 251848, + "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", + "repo": "mitmproxy", + "path": "test/mitmproxy/proxy/layers/http/test_http.py", + "file_name": "test_http.py", + "fun_name": "test_connect_more_newlines", + "commit_message": "make it black!", + "code": "def test_connect_more_newlines(tctx):\n \n server = Placeholder(Server)\n playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular))\n nl = Placeholder(layer.NextLayer)\n\n assert (\n playbook\n >> DataReceived(tctx.client, b\"CONNECT example.com:80 HTTP/1.1\\r\\n\\r\\n\\r\\n\")\n << http.HttpConnectHook(Placeholder())\n >> reply()\n << OpenConnection(server)\n >> reply(None)\n << SendData(tctx.client, b\"HTTP/1.1 200 Connection established\\r\\n\\r\\n\")\n >> DataReceived(tctx.client, b\"\\x16\\x03\\x03\\x00\\xb3\\x01\\x00\\x00\\xaf\\x03\\x03\")\n << layer.NextLayerHook(nl)\n )\n assert nl().data_client() == b\"\\x16\\x03\\x03\\x00\\xb3\\x01\\x00\\x00\\xaf\\x03\\x03\"\n\n", + "url": "https://github.com/mitmproxy/mitmproxy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 128, + "n_words": 44, + "vocab_size": 33, + "complexity": 1, + "nloc": 16, + "token_counts": 118, + "n_ast_nodes": 212, + "n_identifiers": 22, + "d_id": 73851, + "documentation": { + "docstring": "Ignore superfluous \\r\\n in CONNECT request, https://github.com/mitmproxy/mitmproxy/issues/4870", + "n_words": 7, + "vocab_size": 7, + "n_whitespaces": 6, + "language": "en" + } + }, + { + "id": 204842, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/db/backends/base/creation.py", + "file_name": "creation.py", + "fun_name": "set_as_test_mirror", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def set_as_test_mirror(self, primary_settings_dict):\n \n self.connection.settings_dict[\"NAME\"] = primary_settings_dict[\"NAME\"]\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 20, + "n_words": 6, + "vocab_size": 6, + "complexity": 1, + "nloc": 2, + "token_counts": 21, + "n_ast_nodes": 38, + "n_identifiers": 5, + "d_id": 50921, + "documentation": { + "docstring": "\n Set this database up to be used in testing as a mirror of a primary\n database whose settings are given.\n ", + "n_words": 20, + "vocab_size": 18, + "n_whitespaces": 42, + "language": "en" + } + }, + { + "id": 212474, + "commit_id": "528d85e642340ef30ec91f30b65c7c43370f648d", + "repo": "bokeh", + "path": "bokeh/core/has_props.py", + "file_name": "has_props.py", + "fun_name": "themed_values", + "commit_message": "Normalize built-in types and remove `Unknown` (#12252)\n\n* Use lower case names for built-in types\r\n\r\nAlso incidentally apply TypeAlias marker.\r\n\r\n* Drop `Unknown` in favour of consistent usage of `Any`\r\n\r\n* Enable lazy annotations in conftest.py", + "code": "def themed_values(self) -> dict[str, Any] | None:\n \n return getattr(self, '__themed_values__', None)\n", + "url": "https://github.com/bokeh/bokeh.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 25, + "n_words": 11, + "vocab_size": 11, + "complexity": 1, + "nloc": 11, + "token_counts": 24, + "n_ast_nodes": 39, + "n_identifiers": 6, + "d_id": 53247, + "documentation": { + "docstring": " Get any theme-provided overrides.\n\n Results are returned as a dict from property name to value, or\n ``None`` if no theme overrides any values for this instance.\n\n Returns:\n dict or None\n\n ", + "n_words": 30, + "vocab_size": 27, + "n_whitespaces": 70, + "language": "en" + } + }, + { + "id": 217867, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/http/server.py", + "file_name": "server.py", + "fun_name": "is_cgi", + "commit_message": "add python 3.10.4 for windows", + "code": "def is_cgi(self):\n \n collapsed_path = _url_collapse_path(self.path)\n dir_sep = collapsed_path.find('/', 1)\n while dir_sep > 0 and not collapsed_path[:dir_sep] in self.cgi_directories:\n dir_sep = collapsed_path.find('/', dir_sep+1)\n if dir_sep > 0:\n head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:]\n self.cgi_info = head, tail\n return True\n return False\n\n\n cgi_directories = ['/cgi-bin', '/htbin']\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 132, + "n_words": 43, + "vocab_size": 30, + "complexity": 4, + "nloc": 10, + "token_counts": 85, + "n_ast_nodes": 153, + "n_identifiers": 11, + "d_id": 54974, + "documentation": { + "docstring": "Test whether self.path corresponds to a CGI script.\n\n Returns True and updates the cgi_info attribute to the tuple\n (dir, rest) if self.path requires running a CGI script.\n Returns False otherwise.\n\n If any exception is raised, the caller should assume that\n self.path was rejected as invalid and act accordingly.\n\n The default implementation tests whether the normalized url\n path begins with one of the strings in self.cgi_directories\n (and the next character is a '/' or the end of the string).\n\n ", + "n_words": 78, + "vocab_size": 59, + "n_whitespaces": 141, + "language": "en" + } + }, + { + "id": 200603, + "commit_id": "69baa8d90fe079b799a80c8c06735c3ebd4bfe33", + "repo": "sympy", + "path": "sympy/algebras/quaternion.py", + "file_name": "quaternion.py", + "fun_name": "to_euler", + "commit_message": "added reference", + "code": "def to_euler(self, seq):\n \n extrinsic = _check_sequence(seq)\n i, j, k = seq.lower()\n i = _elementary_axis_index(i)\n j = _elementary_axis_index(j)\n k = _elementary_axis_index(k)\n\n if not extrinsic:\n i, k = k, i\n\n # check if sequence is symmetric\n symmetric = i == k\n if symmetric:\n k = 6 - i - j\n\n # parity of the permutation\n sign = (i - j) * (j - k) * (k - i) // 2\n\n # permutate elements\n elements = [self.a, self.b, self.c, self.d]\n a = elements[0]\n b = elements[i]\n c = elements[j]\n d = elements[k] * sign\n\n if not symmetric:\n a, b, c, d = a - c, b + d, c + a, d - b\n\n # calculate angles\n half_sum = atan2(b, a)\n half_diff = atan2(d, c)\n\n angle_2 = 2*atan2(sqrt(c*c + d*d), sqrt(a*a + b*b))\n # alternatively, we can use this to avoid the square root:\n # angle_2 = acos(2*(a*a + b*b)/(a*a + b*b + c*c + d*d) - 1)\n\n angle_1 = half_sum + half_diff\n angle_3 = half_sum - half_diff\n\n if extrinsic:\n angle_1, angle_3 = angle_3, angle_1\n\n # for Tait-Bryan angles\n if not symmetric:\n angle_2 -= pi / 2\n if extrinsic:\n angle_3 *= sign\n else:\n angle_1 *= sign\n\n return Matrix([angle_1, angle_2, angle_3])\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 521, + "n_words": 197, + "vocab_size": 104, + "complexity": 7, + "nloc": 33, + "token_counts": 258, + "n_ast_nodes": 404, + "n_identifiers": 26, + "d_id": 49727, + "documentation": { + "docstring": "Returns Euler angles representing same in the sequence given by\n `seq`. This implements the method described in [1]_.\n\n Parameters\n ==========\n\n seq : string of length 3\n Represents the sequence of rotations.\n For intrinsic rotations, seq but be all lowercase and its elements\n must be from the set `['x', 'y', 'z']`\n For extrinsic rotations, seq but be all uppercase and its elements\n must be from the set `['X', 'Y', 'Z']`\n\n Returns\n =======\n\n Matrix\n The Euler angles calculated from the quaternion\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy.abc import a, b, c, d\n >>> euler = Quaternion(a, b, c, d).to_euler('zyz')\n >>> euler\n Matrix([[-atan2(-b, c) + atan2(d, a)],\n [2*atan2(sqrt(b**2 + c**2), sqrt(a**2 + d**2))],\n [atan2(-b, c) + atan2(d, a)]])\n\n References\n ==========\n\n .. [1] https://doi.org/10.1371/journal.pone.0276302\n\n ", + "n_words": 124, + "vocab_size": 82, + "n_whitespaces": 346, + "language": "en" + } + }, + { + "id": 144040, + "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", + "repo": "ray", + "path": "rllib/utils/metrics/window_stat.py", + "file_name": "window_stat.py", + "fun_name": "quantiles", + "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", + "code": "def quantiles(self) -> np.ndarray:\n \n if not self.count:\n return np.ndarray([], dtype=np.float32)\n else:\n return np.nanpercentile(\n self.items[: self.count], [0, 10, 50, 90, 100]\n ).tolist()\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 90, + "n_words": 21, + "vocab_size": 20, + "complexity": 2, + "nloc": 8, + "token_counts": 63, + "n_ast_nodes": 97, + "n_identifiers": 10, + "d_id": 33098, + "documentation": { + "docstring": "Returns ndarray with 0, 10, 50, 90, and 100 percentiles.", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 45916, + "commit_id": "401419432082d222b823e4f2a66f21e5cc3ab28d", + "repo": "airflow", + "path": "tests/providers/databricks/operators/test_databricks_sql.py", + "file_name": "test_databricks_sql.py", + "fun_name": "test_copy_with_credential", + "commit_message": "Add new options to DatabricksCopyIntoOperator (#22076)\n\nThis includes:\r\n* `encryption` - to specify encryption options for a given location\r\n* `credential` - to specify authentication options for a given location\r\n* `validate` - to control validation of schema & data", + "code": "def test_copy_with_credential(self):\n expression = \"col1, col2\"\n op = DatabricksCopyIntoOperator(\n file_location=COPY_FILE_LOCATION,\n file_format='CSV',\n table_name='test',\n task_id=TASK_ID,\n expression_list=expression,\n credential={'AZURE_SAS_TOKEN': 'abc'},\n )\n assert (\n op._create_sql_query()\n == f.strip()\n )\n", + "url": "https://github.com/apache/airflow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 145, + "n_words": 23, + "vocab_size": 21, + "complexity": 1, + "nloc": 17, + "token_counts": 56, + "n_ast_nodes": 102, + "n_identifiers": 15, + "d_id": 8743, + "documentation": { + "docstring": "COPY INTO test\nFROM (SELECT {expression} FROM '{COPY_FILE_LOCATION}' WITH (CREDENTIAL (AZURE_SAS_TOKEN = 'abc') ))\nFILEFORMAT = CSV\n", + "n_words": 17, + "vocab_size": 15, + "n_whitespaces": 14, + "language": "en" + } + }, + { + "id": 311454, + "commit_id": "58b8c30221a6f6e5acbbe98b7e3298b03fb741f5", + "repo": "core", + "path": "tests/components/homekit_controller/test_button.py", + "file_name": "test_button.py", + "fun_name": "test_press_button", + "commit_message": "Improve homekit_controller tests (#65266)", + "code": "async def test_press_button(hass):\n \n helper = await setup_test_component(hass, create_switch_with_setup_button)\n\n # Helper will be for the primary entity, which is the outlet. Make a helper for the button.\n button = Helper(\n hass,\n \"button.testdevice_setup\",\n helper.pairing,\n helper.accessory,\n helper.config_entry,\n )\n\n await hass.services.async_call(\n \"button\",\n \"press\",\n {\"entity_id\": \"button.testdevice_setup\"},\n blocking=True,\n )\n button.async_assert_service_values(\n ServicesTypes.OUTLET,\n {\n CharacteristicsTypes.Vendor.HAA_SETUP: \"#HAA@trcmd\",\n },\n )\n\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 172, + "n_words": 50, + "vocab_size": 42, + "complexity": 1, + "nloc": 21, + "token_counts": 78, + "n_ast_nodes": 129, + "n_identifiers": 19, + "d_id": 110119, + "documentation": { + "docstring": "Test a switch service that has a button characteristic is correctly handled.", + "n_words": 12, + "vocab_size": 11, + "n_whitespaces": 11, + "language": "en" + } + }, + { + "id": 131108, + "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", + "repo": "ray", + "path": "python/ray/tests/aws/utils/stubs.py", + "file_name": "stubs.py", + "fun_name": "describe_an_sg_2", + "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", + "code": "def describe_an_sg_2(ec2_client_stub, security_group):\n \n ec2_client_stub.add_response(\n \"describe_security_groups\",\n expected_params={\"GroupIds\": [security_group[\"GroupId\"]]},\n service_response={\"SecurityGroups\": [security_group]},\n )\n\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 40, + "n_words": 10, + "vocab_size": 10, + "complexity": 1, + "nloc": 6, + "token_counts": 38, + "n_ast_nodes": 66, + "n_identifiers": 6, + "d_id": 29476, + "documentation": { + "docstring": "Same as last function, different input param format.\n\n A call with this input parameter format is made when sg.ip_permissions is\n accessed in aws/config.py.\n ", + "n_words": 23, + "vocab_size": 21, + "n_whitespaces": 32, + "language": "en" + } + }, + { + "id": 40221, + "commit_id": "c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c", + "repo": "dash", + "path": "dash/testing/browser.py", + "file_name": "browser.py", + "fun_name": "wait_for_style_to_equal", + "commit_message": "f-strings everywhere! fffff", + "code": "def wait_for_style_to_equal(self, selector, style, val, timeout=None):\n \n return self._wait_for(\n method=style_to_equal,\n args=(selector, style, val),\n timeout=timeout,\n msg=f\"style val => {style} {val} not found within {timeout or self._wait_timeout}s\",\n )\n", + "url": "https://github.com/plotly/dash.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 90, + "n_words": 25, + "vocab_size": 24, + "complexity": 1, + "nloc": 7, + "token_counts": 45, + "n_ast_nodes": 81, + "n_identifiers": 12, + "d_id": 7360, + "documentation": { + "docstring": "Explicit wait until the element's style has expected `value` timeout\n if not set, equals to the fixture's `wait_timeout` shortcut to\n `WebDriverWait` with customized `style_to_equal` condition.", + "n_words": 25, + "vocab_size": 23, + "n_whitespaces": 38, + "language": "en" + } + }, + { + "id": 48672, + "commit_id": "c10f2266222c434485889b08cc1463acdb8fa169", + "repo": "django-rest-framework", + "path": "rest_framework/renderers.py", + "file_name": "renderers.py", + "fun_name": "get_rendered_html_form", + "commit_message": "Refactor: Replace try/except with contextlib.suppress() (#8676)", + "code": "def get_rendered_html_form(self, data, view, method, request):\n \n # See issue #2089 for refactoring this.\n serializer = getattr(data, 'serializer', None)\n if serializer and not getattr(serializer, 'many', False):\n instance = getattr(serializer, 'instance', None)\n if isinstance(instance, Page):\n instance = None\n else:\n instance = None\n\n # If this is valid serializer data, and the form is for the same\n # HTTP method as was used in the request then use the existing\n # serializer instance, rather than dynamically creating a new one.\n if request.method == method and serializer is not None:\n try:\n kwargs = {'data': request.data}\n except ParseError:\n kwargs = {}\n existing_serializer = serializer\n else:\n kwargs = {}\n existing_serializer = None\n\n with override_method(view, request, method) as request:\n if not self.show_form_for_method(view, method, request, instance):\n return\n\n if method in ('DELETE', 'OPTIONS'):\n return True # Don't actually need to return a form\n\n has_serializer = getattr(view, 'get_serializer', None)\n has_serializer_class = getattr(view, 'serializer_class', None)\n\n if (\n (not has_serializer and not has_serializer_class) or\n not any(is_form_media_type(parser.media_type) for parser in view.parser_classes)\n ):\n return\n\n if existing_serializer is not None:\n with contextlib.suppress(TypeError):\n return self.render_form_for_serializer(existing_serializer)\n if has_serializer:\n if method in ('PUT', 'PATCH'):\n serializer = view.get_serializer(instance=instance, **kwargs)\n else:\n serializer = view.get_serializer(**kwargs)\n else:\n # at this point we must have a serializer_class\n if method in ('PUT', 'PATCH'):\n serializer = self._get_serializer(view.serializer_class, view,\n request, instance=instance, **kwargs)\n else:\n serializer = self._get_serializer(view.serializer_class, view,\n request, **kwargs)\n\n return self.render_form_for_serializer(serializer)\n", + "url": "https://github.com/encode/django-rest-framework.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 902, + "n_words": 215, + "vocab_size": 111, + "complexity": 17, + "nloc": 45, + "token_counts": 308, + "n_ast_nodes": 503, + "n_identifiers": 30, + "d_id": 9563, + "documentation": { + "docstring": "\n Return a string representing a rendered HTML form, possibly bound to\n either the input or output data.\n\n In the absence of the View having an associated form then return None.\n ", + "n_words": 30, + "vocab_size": 27, + "n_whitespaces": 59, + "language": "en" + } + }, + { + "id": 276385, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/testing_infra/test_utils.py", + "file_name": "test_utils.py", + "fun_name": "get_expected_metric_variable_names", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def get_expected_metric_variable_names(var_names, name_suffix=\"\"):\n \n if tf.__internal__.tf2.enabled() or tf.executing_eagerly():\n # In V1 eager mode and V2 variable names are not made unique.\n return [n + \":0\" for n in var_names]\n # In V1 graph mode variable names are made unique using a suffix.\n return [n + name_suffix + \":0\" for n in var_names]\n\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 77, + "n_words": 51, + "vocab_size": 34, + "complexity": 5, + "nloc": 4, + "token_counts": 49, + "n_ast_nodes": 85, + "n_identifiers": 9, + "d_id": 81647, + "documentation": { + "docstring": "Returns expected metric variable names given names and prefix/suffix.", + "n_words": 9, + "vocab_size": 8, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 207811, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "tests/admin_views/tests.py", + "file_name": "tests.py", + "fun_name": "test_incorrect_lookup_parameters", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def test_incorrect_lookup_parameters(self):\n \n changelist_url = reverse(\"admin:admin_views_thing_changelist\")\n response = self.client.get(changelist_url, {\"notarealfield\": \"5\"})\n self.assertRedirects(response, \"%s?e=1\" % changelist_url)\n\n # Spanning relationships through a nonexistent related object (Refs #16716)\n response = self.client.get(changelist_url, {\"notarealfield__whatever\": \"5\"})\n self.assertRedirects(response, \"%s?e=1\" % changelist_url)\n\n response = self.client.get(\n changelist_url, {\"color__id__exact\": \"StringNotInteger!\"}\n )\n self.assertRedirects(response, \"%s?e=1\" % changelist_url)\n\n # Regression test for #18530\n response = self.client.get(changelist_url, {\"pub_date__gte\": \"foo\"})\n self.assertRedirects(response, \"%s?e=1\" % changelist_url)\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 160, + "n_words": 58, + "vocab_size": 35, + "complexity": 1, + "nloc": 12, + "token_counts": 116, + "n_ast_nodes": 207, + "n_identifiers": 8, + "d_id": 52112, + "documentation": { + "docstring": "Ensure incorrect lookup parameters are handled gracefully.", + "n_words": 7, + "vocab_size": 7, + "n_whitespaces": 6, + "language": "en" + } + }, + { + "id": 101759, + "commit_id": "c79175cbde5600bebd65785f3821fc74b3a80cbe", + "repo": "faceswap", + "path": "tools/alignments/media.py", + "file_name": "media.py", + "fun_name": "load_items", + "commit_message": "Alignments Tool updates\n - Copy info back to alignments file from faces", + "code": "def load_items(self) -> Dict[str, List[int]]:\n \n faces: Dict[str, List[int]] = {}\n for face in cast(List[Tuple[str, \"PNGHeaderDict\"]], self.file_list_sorted):\n src = face[1][\"source\"]\n faces.setdefault(src[\"source_filename\"], []).append(src[\"face_index\"])\n logger.trace(faces) # type: ignore\n return faces\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 85, + "n_words": 27, + "vocab_size": 25, + "complexity": 2, + "nloc": 14, + "token_counts": 87, + "n_ast_nodes": 138, + "n_identifiers": 16, + "d_id": 21163, + "documentation": { + "docstring": " Load the face names into dictionary.\n\n Returns\n -------\n dict\n The source filename as key with list of face indices for the frame as value\n ", + "n_words": 24, + "vocab_size": 21, + "n_whitespaces": 64, + "language": "en" + } + }, + { + "id": 200153, + "commit_id": "9b2351534f8f02bcd5b9691d5e7a06150685beca", + "repo": "sympy", + "path": "sympy/matrices/common.py", + "file_name": "common.py", + "fun_name": "row_join", + "commit_message": "Make ShapeError more readable", + "code": "def row_join(self, other):\n \n # A null matrix can always be stacked (see #10770)\n if self.cols == 0 and self.rows != other.rows:\n return self._new(other.rows, 0, []).row_join(other)\n\n if self.rows != other.rows:\n raise ShapeError(\n \"The matrices have incompatible number of rows ({} and {})\"\n .format(self.rows, other.rows))\n return self._eval_row_join(other)\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 133, + "n_words": 45, + "vocab_size": 39, + "complexity": 4, + "nloc": 8, + "token_counts": 74, + "n_ast_nodes": 118, + "n_identifiers": 9, + "d_id": 49553, + "documentation": { + "docstring": "Concatenates two matrices along self's last and rhs's first column\n\n Examples\n ========\n\n >>> from sympy import zeros, ones\n >>> M = zeros(3)\n >>> V = ones(3, 1)\n >>> M.row_join(V)\n Matrix([\n [0, 0, 0, 1],\n [0, 0, 0, 1],\n [0, 0, 0, 1]])\n\n See Also\n ========\n\n row\n col_join\n ", + "n_words": 47, + "vocab_size": 34, + "n_whitespaces": 152, + "language": "en" + } + }, + { + "id": 201335, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "tests/auth_tests/test_management.py", + "file_name": "test_management.py", + "fun_name": "test_permission_with_proxy_content_type_created", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def test_permission_with_proxy_content_type_created(self):\n \n opts = UserProxy._meta\n codename = get_permission_codename(\"add\", opts)\n self.assertTrue(\n Permission.objects.filter(\n content_type__model=opts.model_name,\n content_type__app_label=opts.app_label,\n codename=codename,\n ).exists()\n )\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 118, + "n_words": 16, + "vocab_size": 15, + "complexity": 1, + "nloc": 10, + "token_counts": 51, + "n_ast_nodes": 83, + "n_identifiers": 16, + "d_id": 49919, + "documentation": { + "docstring": "\n A proxy model's permissions use its own content type rather than the\n content type of the concrete model.\n ", + "n_words": 18, + "vocab_size": 15, + "n_whitespaces": 40, + "language": "en" + } + }, + { + "id": 24519, + "commit_id": "ddaa2c2552e19635cd6cdf38619f1f176c358f89", + "repo": "PaddleOCR", + "path": "ppstructure/table/table_master_match.py", + "file_name": "table_master_match.py", + "fun_name": "get_bboxes_list", + "commit_message": "add SLANet", + "code": "def get_bboxes_list(end2end_result, structure_master_result):\n \n # end2end\n end2end_xyxy_list = []\n end2end_xywh_list = []\n for end2end_item in end2end_result:\n src_bbox = end2end_item['bbox']\n end2end_xyxy_list.append(src_bbox)\n xywh_bbox = xyxy2xywh(src_bbox)\n end2end_xywh_list.append(xywh_bbox)\n end2end_xyxy_bboxes = np.array(end2end_xyxy_list)\n end2end_xywh_bboxes = np.array(end2end_xywh_list)\n\n # structure master\n src_bboxes = structure_master_result['bbox']\n src_bboxes = remove_empty_bboxes(src_bboxes)\n # structure_master_xywh_bboxes = src_bboxes\n # xyxy_bboxes = xywh2xyxy(src_bboxes)\n # structure_master_xyxy_bboxes = xyxy_bboxes\n structure_master_xyxy_bboxes = src_bboxes\n xywh_bbox = xyxy2xywh(src_bboxes)\n structure_master_xywh_bboxes = xywh_bbox\n\n return end2end_xyxy_bboxes, end2end_xywh_bboxes, structure_master_xywh_bboxes, structure_master_xyxy_bboxes\n\n", + "url": "https://github.com/PaddlePaddle/PaddleOCR.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 143, + "n_words": 64, + "vocab_size": 37, + "complexity": 2, + "nloc": 16, + "token_counts": 93, + "n_ast_nodes": 159, + "n_identifiers": 18, + "d_id": 4761, + "documentation": { + "docstring": "\n This function is use to convert end2end results and structure master results to\n List of xyxy bbox format and List of xywh bbox format\n :param end2end_result: bbox's format is xyxy\n :param structure_master_result: bbox's format is xywh\n :return: 4 kind list of bbox ()\n ", + "n_words": 43, + "vocab_size": 26, + "n_whitespaces": 62, + "language": "en" + } + }, + { + "id": 212647, + "commit_id": "ef3746cb06a9ee6bc93bc3c163ba961fd1b9c413", + "repo": "PySimpleGUI", + "path": "PySimpleGUI.py", + "file_name": "PySimpleGUI.py", + "fun_name": "make_modal", + "commit_message": "set_options - new parm disable_modal_windows provides ability to disable modal setting for all windows including popups", + "code": "def make_modal(self):\n \n if not self._is_window_created('tried Window.make_modal'):\n return\n\n if running_mac() and ENABLE_MAC_MODAL_DISABLE_PATCH:\n return\n\n # if modal windows have been disabled globally\n if not DEFAULT_MODAL_WINDOWS_ENABLED:\n return\n\n try:\n self.TKroot.transient()\n self.TKroot.grab_set()\n self.TKroot.focus_force()\n except Exception as e:\n print('Exception trying to make modal', e)\n", + "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 164, + "n_words": 38, + "vocab_size": 32, + "complexity": 6, + "nloc": 13, + "token_counts": 63, + "n_ast_nodes": 116, + "n_identifiers": 13, + "d_id": 53310, + "documentation": { + "docstring": "\n Makes a window into a \"Modal Window\"\n This means user will not be able to interact with other windows until this one is closed\n\n NOTE - Sorry Mac users - you can't have modal windows.... lobby your tkinter Mac devs\n ", + "n_words": 40, + "vocab_size": 37, + "n_whitespaces": 69, + "language": "en" + } + }, + { + "id": 179703, + "commit_id": "9e4541822770333ab5191bc01aa3edc9738f17ff", + "repo": "gradio", + "path": "gradio/components.py", + "file_name": "components.py", + "fun_name": "set_interpret_parameters", + "commit_message": "Blocks-Components\n- move audio component", + "code": "def set_interpret_parameters(self, segments=8):\n \n self.interpretation_segments = segments\n return self\n", + "url": "https://github.com/gradio-app/gradio.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 7, + "n_whitespaces": 29, + "n_words": 8, + "vocab_size": 8, + "complexity": 1, + "nloc": 3, + "token_counts": 17, + "n_ast_nodes": 29, + "n_identifiers": 4, + "d_id": 43002, + "documentation": { + "docstring": "\n Calculates interpretation score of audio subsections by splitting the audio into subsections, then using a \"leave one out\" method to calculate the score of each subsection by removing the subsection and measuring the delta of the output value.\n Parameters:\n segments (int): Number of interpretation segments to split audio into.\n ", + "n_words": 49, + "vocab_size": 34, + "n_whitespaces": 78, + "language": "en" + } + }, + { + "id": 167592, + "commit_id": "f538568afc2c76c2d738d32e3544cf9fe6742960", + "repo": "pandas", + "path": "pandas/_testing/contexts.py", + "file_name": "contexts.py", + "fun_name": "with_csv_dialect", + "commit_message": "TYP: misc return type annotations (#47558)", + "code": "def with_csv_dialect(name, **kwargs) -> Iterator[None]:\n \n import csv\n\n _BUILTIN_DIALECTS = {\"excel\", \"excel-tab\", \"unix\"}\n\n if name in _BUILTIN_DIALECTS:\n raise ValueError(\"Cannot override builtin dialect.\")\n\n csv.register_dialect(name, **kwargs)\n try:\n yield\n finally:\n csv.unregister_dialect(name)\n\n\n@contextmanager", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "@contextmanager", + "n_ast_errors": 1, + "ast_levels": 10, + "n_whitespaces": 69, + "n_words": 28, + "vocab_size": 27, + "complexity": 3, + "nloc": 28, + "token_counts": 55, + "n_ast_nodes": 103, + "n_identifiers": 10, + "d_id": 40048, + "documentation": { + "docstring": "\n Context manager to temporarily register a CSV dialect for parsing CSV.\n\n Parameters\n ----------\n name : str\n The name of the dialect.\n kwargs : mapping\n The parameters for the dialect.\n\n Raises\n ------\n ValueError : the name of the dialect conflicts with a builtin one.\n\n See Also\n --------\n csv : Python's CSV library.\n ", + "n_words": 51, + "vocab_size": 36, + "n_whitespaces": 102, + "language": "en" + } + }, + { + "id": 42077, + "commit_id": "6460a21555ba6557e1f6f06f4d677d9c19148169", + "repo": "seaborn", + "path": "seaborn/utils.py", + "file_name": "utils.py", + "fun_name": "adjust_legend_subtitles", + "commit_message": "Workaround for matplotlib rc_context issue (#2925)\n\n* Workaround for matplotlib rc_context issue\r\n\r\nFixes #2914\r\n\r\n* Add some additional comments about this workaround", + "code": "def adjust_legend_subtitles(legend):\n \n # Legend title not in rcParams until 3.0\n font_size = plt.rcParams.get(\"legend.title_fontsize\", None)\n hpackers = legend.findobj(mpl.offsetbox.VPacker)[0].get_children()\n for hpack in hpackers:\n draw_area, text_area = hpack.get_children()\n handles = draw_area.get_children()\n if not all(artist.get_visible() for artist in handles):\n draw_area.set_width(0)\n for text in text_area.get_children():\n if font_size is not None:\n text.set_size(font_size)\n\n", + "url": "https://github.com/mwaskom/seaborn.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 138, + "n_words": 46, + "vocab_size": 34, + "complexity": 6, + "nloc": 11, + "token_counts": 100, + "n_ast_nodes": 165, + "n_identifiers": 22, + "d_id": 7477, + "documentation": { + "docstring": "\n Make invisible-handle \"subtitles\" entries look more like titles.\n\n Note: This function is not part of the public API and may be changed or removed.\n\n ", + "n_words": 24, + "vocab_size": 24, + "n_whitespaces": 34, + "language": "en" + } + }, + { + "id": 272002, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/engine/training_v1.py", + "file_name": "training_v1.py", + "fun_name": "_prepare_skip_target_masks", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def _prepare_skip_target_masks(self):\n \n return [l is None for l in self.loss_functions]\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 24, + "n_words": 10, + "vocab_size": 10, + "complexity": 2, + "nloc": 2, + "token_counts": 18, + "n_ast_nodes": 30, + "n_identifiers": 4, + "d_id": 80930, + "documentation": { + "docstring": "Boolean mask for whether the target in the output list should be skipped.\n\n If the loss function corresponding to a model output is None, then this\n output will be skipped during total loss calculation and feed targets\n preparation.\n\n Returns:\n A boolean list for whether the corresponding target in the output list\n should be skipped during loss calculation.\n ", + "n_words": 57, + "vocab_size": 36, + "n_whitespaces": 110, + "language": "en" + } + }, + { + "id": 281593, + "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", + "repo": "OpenBBTerminal", + "path": "gamestonk_terminal/terminal_helper.py", + "file_name": "terminal_helper.py", + "fun_name": "update_terminal", + "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", + "code": "def update_terminal():\n \n poetry_hash = sha256sum(\"poetry.lock\")\n\n completed_process = subprocess.run(\"git pull\", shell=True, check=False) # nosec\n if completed_process.returncode != 0:\n return completed_process.returncode\n\n new_poetry_hash = sha256sum(\"poetry.lock\")\n\n if poetry_hash == new_poetry_hash:\n console.print(\"Great, seems like poetry hasn't been updated!\")\n return completed_process.returncode\n console.print(\n \"Seems like more modules have been added, grab a coke, this may take a while.\"\n )\n\n completed_process = subprocess.run(\n \"poetry install\", shell=True, check=False\n ) # nosec\n if completed_process.returncode != 0:\n return completed_process.returncode\n\n return 0\n\n", + "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 150, + "n_words": 70, + "vocab_size": 46, + "complexity": 4, + "nloc": 18, + "token_counts": 94, + "n_ast_nodes": 163, + "n_identifiers": 12, + "d_id": 83890, + "documentation": { + "docstring": "Updates the terminal by running git pull in the directory. Runs poetry install if needed", + "n_words": 15, + "vocab_size": 14, + "n_whitespaces": 15, + "language": "en" + } + }, + { + "id": 321448, + "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", + "repo": "qutebrowser", + "path": "tests/unit/utils/test_qtutils.py", + "file_name": "test_qtutils.py", + "fun_name": "test_serialize_post_error_mock", + "commit_message": "Run scripts/dev/rewrite_enums.py", + "code": "def test_serialize_post_error_mock(self, stream_mock):\n \n obj = QPoint()\n stream_mock.__lshift__.side_effect = lambda _other: self._set_status(\n stream_mock, QDataStream.Status.ReadCorruptData)\n\n with pytest.raises(OSError, match=\"The data stream has read corrupt \"\n \"data.\"):\n qtutils.serialize_stream(stream_mock, obj)\n\n assert stream_mock.__lshift__.called_once_with(obj)\n", + "url": "https://github.com/qutebrowser/qutebrowser.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 125, + "n_words": 27, + "vocab_size": 26, + "complexity": 1, + "nloc": 8, + "token_counts": 64, + "n_ast_nodes": 108, + "n_identifiers": 19, + "d_id": 117730, + "documentation": { + "docstring": "Test serialize_stream with an error while serializing.", + "n_words": 7, + "vocab_size": 7, + "n_whitespaces": 6, + "language": "en" + } + }, + { + "id": 163219, + "commit_id": "3977d7335f0792c012013e3b459a7950dfd31a7d", + "repo": "pandas", + "path": "pandas/core/indexes/base.py", + "file_name": "base.py", + "fun_name": "_validate_fill_value", + "commit_message": "REF: consolidate _validate_fill_logic in np_can_hold_element (#45216)", + "code": "def _validate_fill_value(self, value):\n \n dtype = self.dtype\n if isinstance(dtype, np.dtype) and dtype.kind not in [\"m\", \"M\"]:\n try:\n return np_can_hold_element(dtype, value)\n except ValueError as err:\n # re-raise as TypeError for consistency\n raise TypeError from err\n if not can_hold_element(self._values, value):\n raise TypeError\n return value\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 154, + "n_words": 41, + "vocab_size": 33, + "complexity": 5, + "nloc": 10, + "token_counts": 67, + "n_ast_nodes": 109, + "n_identifiers": 13, + "d_id": 39405, + "documentation": { + "docstring": "\n Check if the value can be inserted into our array without casting,\n and convert it to an appropriate native type if necessary.\n\n Raises\n ------\n TypeError\n If the value cannot be inserted into an array of this dtype.\n ", + "n_words": 37, + "vocab_size": 29, + "n_whitespaces": 91, + "language": "en" + } + }, + { + "id": 14004, + "commit_id": "5a0830cfb6bfa33dcffb38681f86efe5f6f0f97c", + "repo": "jina", + "path": "jina/serve/stream/helper.py", + "file_name": "helper.py", + "fun_name": "__anext__", + "commit_message": "refactor: avoid run in executor creating threads (#5518)", + "code": "async def __anext__(self):\n if isinstance(self.iterator, Iterator):\n \n\n if not self._iterate_sync_in_thread:", + "url": "https://github.com/jina-ai/jina.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 38, + "n_words": 9, + "vocab_size": 8, + "complexity": 7, + "nloc": 27, + "token_counts": 108, + "n_ast_nodes": 39, + "n_identifiers": 6, + "d_id": 2797, + "documentation": { + "docstring": "\n An `Iterator` indicates \"blocking\" code, which might block all tasks in the event loop.\n Hence we iterate in the default executor provided by asyncio.\n ", + "n_words": 24, + "vocab_size": 22, + "n_whitespaces": 58, + "language": "en" + } + }, + { + "id": 20914, + "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", + "repo": "pipenv", + "path": "pipenv/patched/notpip/_vendor/typing_extensions.py", + "file_name": "typing_extensions.py", + "fun_name": "__new__", + "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", + "code": "def __new__(cls, name, bases, ns, total=True):\n # Create new typed dict class object.\n # This method is called directly when TypedDict is subclassed,\n # or via _typeddict_new when TypedDict is instantiated. This way\n # TypedDict supports all three syntaxes described in its docstring.\n # Subclasses and instances of TypedDict return actual dictionaries\n # via _dict_new.\n ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new\n tp_dict = super().__new__(cls, name, (dict,), ns)\n\n annotations = {}\n own_annotations = ns.get('__annotations__', {})\n own_annotation_keys = set(own_annotations.keys())\n msg = \"TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type\"\n own_annotations = {\n n: typing._type_check(tp, msg) for n, tp in own_annotations.items()\n }\n required_keys = set()\n optional_keys = set()\n\n for base in bases:\n annotations.update(base.__dict__.get('__annotations__', {}))\n required_keys.update(base.__dict__.get('__required_keys__', ()))\n optional_keys.update(base.__dict__.get('__optional_keys__', ()))\n\n annotations.update(own_annotations)\n if total:\n required_keys.update(own_annotation_keys)\n else:\n optional_keys.update(own_annotation_keys)\n\n tp_dict.__annotations__ = annotations\n tp_dict.__required_keys__ = frozenset(required_keys)\n tp_dict.__optional_keys__ = frozenset(optional_keys)\n if not hasattr(tp_dict, '__total__'):\n tp_dict.__total__ = total\n return tp_dict\n\n __instancecheck__ = __subclasscheck__ = _check_fails\n\n TypedDict = _TypedDictMeta('TypedDict', (dict,), {})\n TypedDict.__module__ = __name__\n TypedDict.__doc__ = \\\n ", + "url": "https://github.com/pypa/pipenv.git", + "language": "Python", + "ast_errors": "\"\"\"A simple typed name space. At runtimeequivalent to a plain dict.\n\n TypedDict creates a dictionary type that expects all ofto a plain dictinstances to have a certain set ofwith eachassociated with a value of a consistent type. This expectation\n is not checked at runtime but is only enforced by type checkers.\n Usage::a value of a consistent type. Thisat runtime butonly enforced by type", + "n_ast_errors": 9, + "ast_levels": 12, + "n_whitespaces": 565, + "n_words": 162, + "vocab_size": 116, + "complexity": 6, + "nloc": 27, + "token_counts": 221, + "n_ast_nodes": 495, + "n_identifiers": 80, + "d_id": 3614, + "documentation": { + "docstring": "A simple typed name space. At runtime it is equivalent to a plain dict.\n\n TypedDict creates a dictionary type that expects all of its\n instances to have a certain set of keys, with each key\n associated with a value of a consistent type. This expectation\n is not checked at runtime but is only enforced by type checkers.\n Usage::\n", + "n_words": 58, + "vocab_size": 46, + "n_whitespaces": 92, + "language": "en" + } + }, + { + "id": 263997, + "commit_id": "04984a040c2396127f234518f783cbed088408bb", + "repo": "pyinstaller", + "path": "PyInstaller/building/api.py", + "file_name": "api.py", + "fun_name": "_set_dependencies", + "commit_message": "building: move filename processing of EXTENSION entries to analysis stage\n\nMove filename processing of EXTENSION TOC entries (i.e., converting\nthe module name to file path and adding the suffix) from the\nbuild stage (i.e., `assemble` in `PKG`, `COLLECT`, and `BUNDLE`)\ninto analysis stage.\n\nThis ensures that during the build stage, the EXTENSION entries\nin the TOC are already full filenames, same as other file-based\nentries (DATA, BINARY, etc.).\n\nThis in turn means that the `add_suffix_to_extension` helper does\nnot need to worry about DEPENDENCY entries anymore, and can\nprocess only EXTENSION ones, as implied by its name.\n\nEarly conversion of EXTENSION module names to file names also\nprevents duplication when the same file is collected as both\nan EXTENSION and some other type, for example DATA:\n```\n('torch._C',\n '...\\\\site-packages\\\\torch\\\\_C.cp39-win_amd64.pyd',\n 'EXTENSION'),\n('torch\\\\_C.cp39-win_amd64.pyd',\n '...\\\\site-pakages\\\\torch\\\\_C.cp39-win_amd64.pyd',\n 'DATA'),\n```\nPrior to this commit, the entries were considered different\nfrom the `TOC` perspective, but led to duplication in onefile\nbuild's PKG once extension's name was changed to the file name\n(whereas in onedir build, the first entry was overwritten by\nthe second).", + "code": "def _set_dependencies(self, analysis, path):\n \n for toc in (analysis.binaries, analysis.datas):\n for i, tpl in enumerate(toc):\n if not tpl[1] in self._dependencies:\n logger.debug(\"Adding dependency %s located in %s\", tpl[1], path)\n self._dependencies[tpl[1]] = path\n else:\n dep_path = self._get_relative_path(path, self._dependencies[tpl[1]])\n # Ignore references that point to the origin package. This can happen if the same resource is listed\n # multiple times in TOCs (e.g., once as binary and once as data).\n if dep_path.endswith(path):\n logger.debug(\n \"Ignoring self-reference of %s for %s, located in %s - duplicated TOC entry?\", tpl[1], path,\n dep_path\n )\n # Clear the entry as it is a duplicate.\n toc[i] = (None, None, None)\n continue\n logger.debug(\"Referencing %s to be a dependency for %s, located in %s\", tpl[1], path, dep_path)\n # Determine the path relative to dep_path (i.e, within the target directory) from the 'name'\n # component of the TOC tuple.\n rel_path = os.path.dirname(tpl[0])\n # Take filename from 'path' (second component of TOC tuple); this way, we don't need to worry about\n # suffix of extensions.\n filename = os.path.basename(tpl[1])\n # Construct the full file path relative to dep_path...\n filename = os.path.join(rel_path, filename)\n # ...and use it in new DEPENDENCY entry\n analysis.dependencies.append((\":\".join((dep_path, filename)), tpl[1], \"DEPENDENCY\"))\n toc[i] = (None, None, None)\n # Clean the list\n toc[:] = [tpl for tpl in toc if tpl != (None, None, None)]\n\n # TODO: use pathlib.Path.relative_to() instead.", + "url": "https://github.com/pyinstaller/pyinstaller.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 18, + "n_whitespaces": 807, + "n_words": 216, + "vocab_size": 129, + "complexity": 7, + "nloc": 22, + "token_counts": 237, + "n_ast_nodes": 365, + "n_identifiers": 24, + "d_id": 77548, + "documentation": { + "docstring": "\n Synchronize the Analysis result with the needed dependencies.\n ", + "n_words": 8, + "vocab_size": 7, + "n_whitespaces": 23, + "language": "en" + } + }, + { + "id": 189478, + "commit_id": "902e7eb4f0147b5882a613b67467e38a1d47f01e", + "repo": "manim", + "path": "manim/mobject/svg/tex_mobject.py", + "file_name": "tex_mobject.py", + "fun_name": "break_up_by_substrings", + "commit_message": "Hide more private methods from the docs. (#2468)\n\n* hide privs from text_mobject.py\r\n\r\n* hide privs from tex_mobject.py\r\n\r\n* hide privs from code_mobject.py\r\n\r\n* hide privs from svg_mobject.py\r\n\r\n* remove SVGPath and utils from __init__.py\r\n\r\n* don't import string_to_numbers\r\n\r\n* hide privs from geometry.py\r\n\r\n* hide privs from matrix.py\r\n\r\n* hide privs from numbers.py\r\n\r\n* hide privs from three_dimensions.py\r\n\r\n* forgot underscore under set_stroke_width_from_length\r\n\r\n* there were more i missed\r\n\r\n* unhidea method that was used in docs\r\n\r\n* forgot other text2hash\r\n\r\n* remove svg_path from docs", + "code": "def _break_up_by_substrings(self):\n \n new_submobjects = []\n curr_index = 0\n for tex_string in self.tex_strings:\n sub_tex_mob = SingleStringMathTex(\n tex_string,\n tex_environment=self.tex_environment,\n tex_template=self.tex_template,\n )\n num_submobs = len(sub_tex_mob.submobjects)\n new_index = (\n curr_index + num_submobs + len(\"\".join(self.arg_separator.split()))\n )\n if num_submobs == 0:\n # For cases like empty tex_strings, we want the corresponding\n # part of the whole MathTex to be a VectorizedPoint\n # positioned in the right part of the MathTex\n sub_tex_mob.submobjects = [VectorizedPoint()]\n last_submob_index = min(curr_index, len(self.submobjects) - 1)\n sub_tex_mob.move_to(self.submobjects[last_submob_index], RIGHT)\n else:\n sub_tex_mob.submobjects = self.submobjects[curr_index:new_index]\n new_submobjects.append(sub_tex_mob)\n curr_index = new_index\n self.submobjects = new_submobjects\n return self\n", + "url": "https://github.com/ManimCommunity/manim.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 394, + "n_words": 88, + "vocab_size": 61, + "complexity": 3, + "nloc": 23, + "token_counts": 142, + "n_ast_nodes": 230, + "n_identifiers": 23, + "d_id": 46082, + "documentation": { + "docstring": "\n Reorganize existing submobjects one layer\n deeper based on the structure of tex_strings (as a list\n of tex_strings)\n ", + "n_words": 17, + "vocab_size": 16, + "n_whitespaces": 46, + "language": "en" + } + }, + { + "id": 63322, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", + "file_name": "pyparsing.py", + "fun_name": "delimitedList", + "commit_message": "upd; format", + "code": "def delimitedList(expr, delim=\",\", combine=False):\n \n dlName = _ustr(expr) + \" [\" + _ustr(delim) + \" \" + _ustr(expr) + \"]...\"\n if combine:\n return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName)\n else:\n return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName)\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 60, + "n_words": 34, + "vocab_size": 21, + "complexity": 2, + "nloc": 6, + "token_counts": 77, + "n_ast_nodes": 132, + "n_identifiers": 10, + "d_id": 13253, + "documentation": { + "docstring": "Helper to define a delimited list of expressions - the delimiter\n defaults to ','. By default, the list elements and delimiters can\n have intervening whitespace, and comments, but this can be\n overridden by passing ``combine=True`` in the constructor. If\n ``combine`` is set to ``True``, the matching tokens are\n returned as a single token string, with the delimiters included;\n otherwise, the matching tokens are returned as a list of tokens,\n with the delimiters suppressed.\n\n Example::\n\n delimitedList(Word(alphas)).parseString(\"aa,bb,cc\") # -> ['aa', 'bb', 'cc']\n delimitedList(Word(hexnums), delim=':', combine=True).parseString(\"AA:BB:CC:DD:EE\") # -> ['AA:BB:CC:DD:EE']\n ", + "n_words": 86, + "vocab_size": 61, + "n_whitespaces": 127, + "language": "en" + } + }, + { + "id": 218095, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/importlib/_bootstrap.py", + "file_name": "_bootstrap.py", + "fun_name": "_gcd_import", + "commit_message": "add python 3.10.4 for windows", + "code": "def _gcd_import(name, package=None, level=0):\n \n _sanity_check(name, package, level)\n if level > 0:\n name = _resolve_name(name, package, level)\n return _find_and_load(name, _gcd_import)\n\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 38, + "n_words": 19, + "vocab_size": 17, + "complexity": 2, + "nloc": 5, + "token_counts": 44, + "n_ast_nodes": 66, + "n_identifiers": 7, + "d_id": 55125, + "documentation": { + "docstring": "Import and return the module based on its name, the package the call is\n being made from, and the level adjustment.\n\n This function represents the greatest common denominator of functionality\n between import_module and __import__. This includes setting __package__ if\n the loader did not.\n\n ", + "n_words": 43, + "vocab_size": 35, + "n_whitespaces": 58, + "language": "en" + } + }, + { + "id": 286538, + "commit_id": "8e9e6bd57f4bc5d57ccedfacccda6342d5881266", + "repo": "OpenBBTerminal", + "path": "openbb_terminal/portfolio/portfolio_model.py", + "file_name": "portfolio_model.py", + "fun_name": "get_transactions", + "commit_message": "Incorporate portfolio class into SDK (#3401)\n\n* create functions to interact with portfolio\r\n\r\n* fix some docstrings\r\n\r\n* view docstrings\r\n\r\n* make portfolio loading available in sdk\r\n\r\n* reorder some methods\r\n\r\n* fix bug\r\n\r\n* update controller\r\n\r\n* update website\r\n\r\n* remove import\r\n\r\n* change input name\r\n\r\n* regenerate website\r\n\r\n* change portfolio arg name\r\n\r\n* fix metrics bugs\r\n\r\n* fix report\r\n\r\n* refactor assets alloc\r\n\r\n* refactor assets sectors alloc\r\n\r\n* remove unecessary attributes\r\n\r\n* refactor allocaasset sector\r\n\r\n* reorganize class\r\n\r\n* first refactor alloc\r\n\r\n* refactor portfolio alloc\r\n\r\n* black\r\n\r\n* fix alloc bug\r\n\r\n* regenerate sdk website\r\n\r\n* fix alloc bugs\r\n\r\n* forgot this exception\r\n\r\n* some refactor on portfolio alloc country region\r\n\r\n* fix some allocation bugs\r\n\r\n* add examples\r\n\r\n* regenerate website\r\n\r\nCo-authored-by: James Maslek ", + "code": "def get_transactions(self):\n \n\n df = self.__transactions[\n [\n \"Date\",\n \"Type\",\n \"Ticker\",\n \"Side\",\n \"Price\",\n \"Quantity\",\n \"Fees\",\n \"Investment\",\n \"Currency\",\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n ]\n ]\n df = df.replace(np.nan, \"-\")\n df[\"Date\"] = df[\"Date\"].dt.strftime(\"%Y-%m-%d\")\n df.sort_values(by=\"Date\", ascending=False, inplace=True)\n return df\n", + "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 299, + "n_words": 33, + "vocab_size": 28, + "complexity": 1, + "nloc": 22, + "token_counts": 87, + "n_ast_nodes": 157, + "n_identifiers": 13, + "d_id": 85861, + "documentation": { + "docstring": "Get formatted transactions\n\n Returns\n -------\n pd.DataFrame: formatted transactions\n ", + "n_words": 8, + "vocab_size": 6, + "n_whitespaces": 40, + "language": "en" + } + }, + { + "id": 23379, + "commit_id": "6e607a0fa1cefbf0388dac86c84debf4781cec48", + "repo": "PaddleOCR", + "path": "ppocr/modeling/backbones/rec_efficientb3_pren.py", + "file_name": "rec_efficientb3_pren.py", + "fun_name": "get_global_params", + "commit_message": "[Feature] Add PREN Scene Text Recognition Model(Accepted in CVPR2021) (#5563)\n\n* [Feature] add PREN scene text recognition model\r\n\r\n* [Patch] Optimize yml File\r\n\r\n* [Patch] Save Label/Pred Preprocess Time Cost\r\n\r\n* [BugFix] Modify Shape Conversion to Fit for Inference Model Exportion\r\n\r\n* [Patch] ?\r\n\r\n* [Patch] ?\r\n\r\n* 啥情况...", + "code": "def get_global_params():\n \n GlobalParams = namedtuple('GlobalParams', [\n 'drop_connect_rate', 'width_coefficient', 'depth_coefficient',\n 'depth_divisor', 'image_size'\n ])\n global_params = GlobalParams(\n drop_connect_rate=0.3,\n width_coefficient=1.2,\n depth_coefficient=1.4,\n depth_divisor=8,\n image_size=64)\n return global_params\n", + "url": "https://github.com/PaddlePaddle/PaddleOCR.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 134, + "n_words": 22, + "vocab_size": 20, + "complexity": 1, + "nloc": 12, + "token_counts": 55, + "n_ast_nodes": 83, + "n_identifiers": 9, + "d_id": 4585, + "documentation": { + "docstring": "\n The fllowing are efficientnetb3's arch superparams, but to fit for scene \n text recognition task, the resolution(image_size) here is changed \n from 300 to 64.\n ", + "n_words": 23, + "vocab_size": 22, + "n_whitespaces": 54, + "language": "en" + } + }, + { + "id": 265553, + "commit_id": "8b1a462a6070cb6054af8bb59589c9a2e785afc2", + "repo": "netbox", + "path": "netbox/netbox/views/generic/object_views.py", + "file_name": "object_views.py", + "fun_name": "get_extra_addanother_params", + "commit_message": "#10094 changes from code review", + "code": "def get_extra_addanother_params(self, request, params):\n \n return {}\n\n #\n # Request handlers\n #\n", + "url": "https://github.com/netbox-community/netbox.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 6, + "n_whitespaces": 34, + "n_words": 11, + "vocab_size": 9, + "complexity": 1, + "nloc": 2, + "token_counts": 13, + "n_ast_nodes": 25, + "n_identifiers": 4, + "d_id": 78139, + "documentation": { + "docstring": "\n Return a dictionary of extra parameters to use on the Add Another button.\n ", + "n_words": 13, + "vocab_size": 13, + "n_whitespaces": 28, + "language": "en" + } + }, + { + "id": 60723, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_internal/index/collector.py", + "file_name": "collector.py", + "fun_name": "_ensure_html_header", + "commit_message": "upd; format", + "code": "def _ensure_html_header(response):\n # type: (Response) -> None\n \n content_type = response.headers.get(\"Content-Type\", \"\")\n if not content_type.lower().startswith(\"text/html\"):\n raise _NotHTML(content_type, response.request.method)\n\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 36, + "n_words": 17, + "vocab_size": 17, + "complexity": 2, + "nloc": 4, + "token_counts": 42, + "n_ast_nodes": 76, + "n_identifiers": 10, + "d_id": 12262, + "documentation": { + "docstring": "Check the Content-Type header to ensure the response contains HTML.\n\n Raises `_NotHTML` if the content type is not text/html.\n ", + "n_words": 19, + "vocab_size": 17, + "n_whitespaces": 25, + "language": "en" + } + }, + { + "id": 247791, + "commit_id": "9d21ecf7ceab55bc19c4457b8b07401b0b1623a7", + "repo": "synapse", + "path": "tests/storage/test_id_generators.py", + "file_name": "test_id_generators.py", + "fun_name": "test_get_next_txn", + "commit_message": "Add type hints to tests files. (#12256)", + "code": "def test_get_next_txn(self) -> None:\n \n\n # Prefill table with 7 rows written by 'master'\n self._insert_rows(\"master\", 7)\n\n id_gen = self._create_id_generator()\n\n self.assertEqual(id_gen.get_positions(), {\"master\": 7})\n self.assertEqual(id_gen.get_current_token_for_writer(\"master\"), 7)\n\n # Try allocating a new ID gen and check that we only see position\n # advanced after we leave the context manager.\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 101, + "n_words": 45, + "vocab_size": 41, + "complexity": 1, + "nloc": 10, + "token_counts": 98, + "n_ast_nodes": 94, + "n_identifiers": 8, + "d_id": 71924, + "documentation": { + "docstring": "Test that the `get_next_txn` function works correctly.", + "n_words": 7, + "vocab_size": 7, + "n_whitespaces": 6, + "language": "en" + } + }, + { + "id": 156063, + "commit_id": "cccb9d8d8e33a891396b1275c2448c352ef40c27", + "repo": "dask", + "path": "dask/array/slicing.py", + "file_name": "slicing.py", + "fun_name": "sanitize_index", + "commit_message": "absolufy-imports - No relative - PEP8 (#8796)\n\nConversation in https://github.com/dask/distributed/issues/5889", + "code": "def sanitize_index(ind):\n \n from dask.array.utils import asanyarray_safe\n\n if ind is None:\n return None\n elif isinstance(ind, slice):\n return slice(\n _sanitize_index_element(ind.start),\n _sanitize_index_element(ind.stop),\n _sanitize_index_element(ind.step),\n )\n elif isinstance(ind, Number):\n return _sanitize_index_element(ind)\n elif is_dask_collection(ind):\n return ind\n index_array = asanyarray_safe(ind, like=ind)\n if index_array.dtype == bool:\n nonzero = np.nonzero(index_array)\n if len(nonzero) == 1:\n # If a 1-element tuple, unwrap the element\n nonzero = nonzero[0]\n if is_arraylike(nonzero):\n return nonzero\n else:\n return np.asanyarray(nonzero)\n elif np.issubdtype(index_array.dtype, np.integer):\n return index_array\n elif np.issubdtype(index_array.dtype, np.floating):\n int_index = index_array.astype(np.intp)\n if np.allclose(index_array, int_index):\n return int_index\n else:\n check_int = np.isclose(index_array, int_index)\n first_err = index_array.ravel()[np.flatnonzero(~check_int)[0]]\n raise IndexError(\"Bad index. Must be integer-like: %s\" % first_err)\n else:\n raise TypeError(\"Invalid index type\", type(ind), ind)\n\n", + "url": "https://github.com/dask/dask.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 356, + "n_words": 103, + "vocab_size": 72, + "complexity": 11, + "nloc": 35, + "token_counts": 235, + "n_ast_nodes": 371, + "n_identifiers": 38, + "d_id": 36528, + "documentation": { + "docstring": "Sanitize the elements for indexing along one axis\n\n >>> sanitize_index([2, 3, 5])\n array([2, 3, 5])\n >>> sanitize_index([True, False, True, False])\n array([0, 2])\n >>> sanitize_index(np.array([1, 2, 3]))\n array([1, 2, 3])\n >>> sanitize_index(np.array([False, True, True]))\n array([1, 2])\n >>> type(sanitize_index(np.int32(0)))\n \n >>> sanitize_index(1.0)\n 1\n >>> sanitize_index(0.5)\n Traceback (most recent call last):\n ...\n IndexError: Bad index. Must be integer-like: 0.5\n ", + "n_words": 57, + "vocab_size": 45, + "n_whitespaces": 109, + "language": "en" + } + }, + { + "id": 85651, + "commit_id": "c48fda09e252018a4d2b831bb84e1c68a739c085", + "repo": "sentry", + "path": "tests/sentry/sentry_metrics/test_multiprocess_steps.py", + "file_name": "test_multiprocess_steps.py", + "fun_name": "test_process_messages_cardinality_limited", + "commit_message": "feat(metrics): Add cardinality limiter to indexer [sns-1651] (#38428)\n\nReopen of https://github.com/getsentry/sentry/pull/38302 to avoid\r\nnotification spam\r\n\r\nSee #38257 and\r\nhttps://www.notion.so/sentry/Metrics-Dimensionality-Limiting-df010a6a6d4e467ca3c5c19230db862b#4966fb9c07fc4394b720ad161c99a096.\r\nThis is just the glue code and configuration options for using the\r\ncardinality limiter in the indexer. The actual implementation is TBD.\r\nThis is safe to merge because the stub implementation does not actually\r\nlimit anything at all, so it should be fast enough to do synchronously\r\nfor now\r\n\r\n## rollout plan\r\n\r\n- [x] https://github.com/getsentry/sentry/pull/38446\r\n- [x] set options to nothing in prod\r\n- [ ] merge + deploy this PR\r\n- [ ] check prod metrics: redis should not be used\r\n- [ ] https://github.com/getsentry/sentry/pull/38445\r\n- [ ] check prod metrics: redis should still not be used\r\n- [ ] run qe tests?\r\n- [ ] get a redis cluster and configure it\r\n- [ ] run use_quota on a separate thread\r\n- [ ] set a quota\r\n- [ ] get rid of writes limiter?\r\n- [ ] stop indexing tag values\r\n\r\nCo-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com>\r\nCo-authored-by: Nikhar Saxena ", + "code": "def test_process_messages_cardinality_limited(caplog, settings, monkeypatch) -> None:\n \n settings.SENTRY_METRICS_INDEXER_DEBUG_LOG_SAMPLE_RATE = 1.0\n", + "url": "https://github.com/getsentry/sentry.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 7, + "n_whitespaces": 15, + "n_words": 9, + "vocab_size": 9, + "complexity": 2, + "nloc": 28, + "token_counts": 170, + "n_ast_nodes": 28, + "n_identifiers": 5, + "d_id": 18024, + "documentation": { + "docstring": "\n Test that the message processor correctly calls the cardinality limiter.\n ", + "n_words": 10, + "vocab_size": 9, + "n_whitespaces": 17, + "language": "en" + } + }, + { + "id": 46825, + "commit_id": "34154803ac73d62d3e969e480405df3073032622", + "repo": "airflow", + "path": "airflow/models/taskmixin.py", + "file_name": "taskmixin.py", + "fun_name": "dag_id", + "commit_message": "Show tasks in grid view based on topological sort. (#22741)\n\nThis takes the existing topological sort that existed on a DAG and moves\r\nit down to TaskGroup.\r\n\r\nIn order to do this (and not have duplicated sort) the existing sort on\r\nDAG is re-implemented on top of the new method.\r\n\r\nThis also surfaced a tiny bug in deserialize_task_group where the\r\nSerializedTaskGroup did not have `dag` set -- it didn't cause any\r\nproblems until now but was needed to call `upstream_list` on a\r\nSerializedTaskGroup object.", + "code": "def dag_id(self) -> str:\n \n if self.dag:\n return self.dag.dag_id\n return \"_in_memory_dag_\"\n", + "url": "https://github.com/apache/airflow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 42, + "n_words": 10, + "vocab_size": 9, + "complexity": 2, + "nloc": 5, + "token_counts": 21, + "n_ast_nodes": 38, + "n_identifiers": 4, + "d_id": 9007, + "documentation": { + "docstring": "Returns dag id if it has one or an adhoc/meaningless ID", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 272922, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/layers/preprocessing/discretization.py", + "file_name": "discretization.py", + "fun_name": "compress", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def compress(summary, epsilon):\n \n # TODO(b/184863356): remove the numpy escape hatch here.\n return tf.numpy_function(\n lambda s: _compress_summary_numpy(s, epsilon), [summary], tf.float32\n )\n\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 39, + "n_words": 20, + "vocab_size": 20, + "complexity": 1, + "nloc": 4, + "token_counts": 31, + "n_ast_nodes": 49, + "n_identifiers": 8, + "d_id": 81077, + "documentation": { + "docstring": "Compress a summary to within `epsilon` accuracy.\n\n The compression step is needed to keep the summary sizes small after merging,\n and also used to return the final target boundaries. It finds the new bins\n based on interpolating cumulative weight percentages from the large summary.\n Taking the difference of the cumulative weights from the previous bin's\n cumulative weight will give the new weight for that bin.\n\n Args:\n summary: 2D `np.ndarray` summary to be compressed.\n epsilon: A `'float32'` that determines the approxmiate desired precision.\n\n Returns:\n A 2D `np.ndarray` that is a compressed summary. First column is the\n interpolated partition values, the second is the weights (counts).\n ", + "n_words": 104, + "vocab_size": 71, + "n_whitespaces": 156, + "language": "en" + } + }, + { + "id": 269278, + "commit_id": "2d1086447a25d281f9428832d046c473d80ad761", + "repo": "keras", + "path": "keras/applications/convnext.py", + "file_name": "convnext.py", + "fun_name": "Head", + "commit_message": "Corrected preprocess_input docstring in regnet.py and convnext.py", + "code": "def Head(num_classes=1000, name=None):\n \n if name is None:\n name = str(backend.get_uid(\"head\"))\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 23, + "n_words": 10, + "vocab_size": 9, + "complexity": 2, + "nloc": 5, + "token_counts": 32, + "n_ast_nodes": 49, + "n_identifiers": 6, + "d_id": 79999, + "documentation": { + "docstring": "Implementation of classification head of RegNet.\n\n Args:\n num_classes: number of classes for Dense layer\n name: name prefix\n\n Returns:\n Classification head function.\n ", + "n_words": 21, + "vocab_size": 18, + "n_whitespaces": 33, + "language": "en" + } + }, + { + "id": 337462, + "commit_id": "e5c17f36a8b5bf8b9478d416c4a80841a353fb19", + "repo": "accelerate", + "path": "src/accelerate/test_utils/testing.py", + "file_name": "testing.py", + "fun_name": "require_tpu", + "commit_message": "Clean up tests + fix import (#330)", + "code": "def require_tpu(test_case):\n \n return unittest.skipUnless(is_tpu_available(), \"test requires TPU\")(test_case)\n\n", + "url": "https://github.com/huggingface/accelerate.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 13, + "n_words": 7, + "vocab_size": 7, + "complexity": 1, + "nloc": 2, + "token_counts": 20, + "n_ast_nodes": 37, + "n_identifiers": 5, + "d_id": 121064, + "documentation": { + "docstring": "\n Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available.\n ", + "n_words": 17, + "vocab_size": 16, + "n_whitespaces": 24, + "language": "en" + } + }, + { + "id": 259201, + "commit_id": "7f0006c8aad1a09621ad19c3db19c3ff0555a183", + "repo": "scikit-learn", + "path": "sklearn/preprocessing/_encoders.py", + "file_name": "_encoders.py", + "fun_name": "_identify_infrequent", + "commit_message": "ENH Adds infrequent categories to OneHotEncoder (#16018)\n\n* ENH Completely adds infrequent categories\r\n\r\n* STY Linting\r\n\r\n* STY Linting\r\n\r\n* DOC Improves wording\r\n\r\n* DOC Lint\r\n\r\n* BUG Fixes\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* DOC Uses math to description float min_frequency\r\n\r\n* DOC Adds comment regarding drop\r\n\r\n* BUG Fixes method name\r\n\r\n* DOC Clearer docstring\r\n\r\n* TST Adds more tests\r\n\r\n* FIX Fixes mege\r\n\r\n* CLN More pythonic\r\n\r\n* CLN Address comments\r\n\r\n* STY Flake8\r\n\r\n* CLN Address comments\r\n\r\n* DOC Fix\r\n\r\n* MRG\r\n\r\n* WIP\r\n\r\n* ENH Address comments\r\n\r\n* STY Fix\r\n\r\n* ENH Use functiion call instead of property\r\n\r\n* ENH Adds counts feature\r\n\r\n* CLN Rename variables\r\n\r\n* DOC More details\r\n\r\n* CLN Remove unneeded line\r\n\r\n* CLN Less lines is less complicated\r\n\r\n* CLN Less diffs\r\n\r\n* CLN Improves readiabilty\r\n\r\n* BUG Fix\r\n\r\n* CLN Address comments\r\n\r\n* TST Fix\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* CLN Move docstring to userguide\r\n\r\n* DOC Better wrapping\r\n\r\n* TST Adds test to handle_unknown='error'\r\n\r\n* ENH Spelling error in docstring\r\n\r\n* BUG Fixes counter with nan values\r\n\r\n* BUG Removes unneeded test\r\n\r\n* BUG Fixes issue\r\n\r\n* ENH Sync with main\r\n\r\n* DOC Correct settings\r\n\r\n* DOC Adds docstring\r\n\r\n* DOC Immprove user guide\r\n\r\n* DOC Move to 1.0\r\n\r\n* DOC Update docs\r\n\r\n* TST Remove test\r\n\r\n* DOC Update docstring\r\n\r\n* STY Linting\r\n\r\n* DOC Address comments\r\n\r\n* ENH Neater code\r\n\r\n* DOC Update explaination for auto\r\n\r\n* Update sklearn/preprocessing/_encoders.py\r\n\r\nCo-authored-by: Roman Yurchak \r\n\r\n* TST Uses docstring instead of comments\r\n\r\n* TST Remove call to fit\r\n\r\n* TST Spelling error\r\n\r\n* ENH Adds support for drop + infrequent categories\r\n\r\n* ENH Adds infrequent_if_exist option\r\n\r\n* DOC Address comments for user guide\r\n\r\n* DOC Address comments for whats_new\r\n\r\n* DOC Update docstring based on comments\r\n\r\n* CLN Update test with suggestions\r\n\r\n* ENH Adds computed property infrequent_categories_\r\n\r\n* DOC Adds where the infrequent column is located\r\n\r\n* TST Adds more test for infrequent_categories_\r\n\r\n* DOC Adds docstring for _compute_drop_idx\r\n\r\n* CLN Moves _convert_to_infrequent_idx into its own method\r\n\r\n* TST Increases test coverage\r\n\r\n* TST Adds failing test\r\n\r\n* CLN Careful consideration of dropped and inverse_transform\r\n\r\n* STY Linting\r\n\r\n* DOC Adds docstrinb about dropping infrequent\r\n\r\n* DOC Uses only\r\n\r\n* DOC Numpydoc\r\n\r\n* TST Includes test for get_feature_names_out\r\n\r\n* DOC Move whats new\r\n\r\n* DOC Address docstring comments\r\n\r\n* DOC Docstring changes\r\n\r\n* TST Better comments\r\n\r\n* TST Adds check for handle_unknown='ignore' for infrequent\r\n\r\n* CLN Make _infrequent_indices private\r\n\r\n* CLN Change min_frequency default to None\r\n\r\n* DOC Adds comments\r\n\r\n* ENH adds support for max_categories=1\r\n\r\n* ENH Describe lexicon ordering for ties\r\n\r\n* DOC Better docstring\r\n\r\n* STY Fix\r\n\r\n* CLN Error when explicity dropping an infrequent category\r\n\r\n* STY Grammar\r\n\r\nCo-authored-by: Joel Nothman \r\nCo-authored-by: Roman Yurchak \r\nCo-authored-by: Guillaume Lemaitre ", + "code": "def _identify_infrequent(self, category_count, n_samples, col_idx):\n \n if isinstance(self.min_frequency, numbers.Integral):\n infrequent_mask = category_count < self.min_frequency\n elif isinstance(self.min_frequency, numbers.Real):\n min_frequency_abs = n_samples * self.min_frequency\n infrequent_mask = category_count < min_frequency_abs\n else:\n infrequent_mask = np.zeros(category_count.shape[0], dtype=bool)\n\n n_current_features = category_count.size - infrequent_mask.sum() + 1\n if self.max_categories is not None and self.max_categories < n_current_features:\n # stable sort to preserve original count order\n smallest_levels = np.argsort(category_count, kind=\"mergesort\")[\n : -self.max_categories + 1\n ]\n infrequent_mask[smallest_levels] = True\n\n output = np.flatnonzero(infrequent_mask)\n return output if output.size > 0 else None\n", + "url": "https://github.com/scikit-learn/scikit-learn.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 237, + "n_words": 78, + "vocab_size": 56, + "complexity": 6, + "nloc": 16, + "token_counts": 146, + "n_ast_nodes": 227, + "n_identifiers": 26, + "d_id": 75639, + "documentation": { + "docstring": "Compute the infrequent indices.\n\n Parameters\n ----------\n category_count : ndarray of shape (n_cardinality,)\n Category counts.\n\n n_samples : int\n Number of samples.\n\n col_idx : int\n Index of the current category. Only used for the error message.\n\n Returns\n -------\n output : ndarray of shape (n_infrequent_categories,) or None\n If there are infrequent categories, indices of infrequent\n categories. Otherwise None.\n ", + "n_words": 55, + "vocab_size": 41, + "n_whitespaces": 173, + "language": "en" + } + }, + { + "id": 143428, + "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", + "repo": "ray", + "path": "rllib/env/wrappers/tests/test_unity3d_env.py", + "file_name": "test_unity3d_env.py", + "fun_name": "test_port_editor", + "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", + "code": "def test_port_editor(self, mock_unity3d):\n \n\n _ = Unity3DEnv(port=None)\n args, kwargs = mock_unity3d.call_args\n mock_unity3d.assert_called_once()\n self.assertEqual(5004, kwargs.get(\"base_port\"))\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 48, + "n_words": 13, + "vocab_size": 12, + "complexity": 1, + "nloc": 5, + "token_counts": 41, + "n_ast_nodes": 70, + "n_identifiers": 12, + "d_id": 32988, + "documentation": { + "docstring": "Test if the environment uses the editor port\n when no environment file is provided", + "n_words": 14, + "vocab_size": 12, + "n_whitespaces": 20, + "language": "en" + } + }, + { + "id": 72986, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/contrib/forms/forms.py", + "file_name": "forms.py", + "fun_name": "get_create_field_function", + "commit_message": "Reformat with black", + "code": "def get_create_field_function(self, type):\n \n create_field_function = getattr(self, \"create_%s_field\" % type, None)\n if create_field_function:\n return create_field_function\n else:\n import inspect\n\n method_list = [\n f[0]\n for f in inspect.getmembers(self.__class__, inspect.isfunction)\n if f[0].startswith(\"create_\") and f[0].endswith(\"_field\")\n ]\n raise AttributeError(\n \"Could not find function matching format \\\n create__field for type: \"\n + type,\n \"Must be one of: \" + \", \".join(method_list),\n )\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 254, + "n_words": 55, + "vocab_size": 48, + "complexity": 5, + "nloc": 17, + "token_counts": 89, + "n_ast_nodes": 149, + "n_identifiers": 15, + "d_id": 15920, + "documentation": { + "docstring": "\n Takes string of field type and returns a Django Form Field Instance.\n Assumes form field creation functions are in the format:\n 'create_fieldtype_field'\n ", + "n_words": 22, + "vocab_size": 21, + "n_whitespaces": 51, + "language": "en" + } + }, + { + "id": 75104, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/images/rich_text/contentstate.py", + "file_name": "contentstate.py", + "fun_name": "image_entity", + "commit_message": "Reformat with black", + "code": "def image_entity(props):\n \n return DOM.create_element(\n \"embed\",\n {\n \"embedtype\": \"image\",\n \"format\": props.get(\"format\"),\n \"id\": props.get(\"id\"),\n \"alt\": props.get(\"alt\"),\n },\n )\n\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 90, + "n_words": 16, + "vocab_size": 16, + "complexity": 1, + "nloc": 10, + "token_counts": 48, + "n_ast_nodes": 91, + "n_identifiers": 5, + "d_id": 16356, + "documentation": { + "docstring": "\n Helper to construct elements of the form\n \n when converting from contentstate data\n ", + "n_words": 18, + "vocab_size": 18, + "n_whitespaces": 31, + "language": "en" + } + }, + { + "id": 196419, + "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", + "repo": "sympy", + "path": "sympy/solvers/bivariate.py", + "file_name": "bivariate.py", + "fun_name": "_linab", + "commit_message": "Moved imports to higher level", + "code": "def _linab(arg, symbol):\n \n arg = factor_terms(arg.expand())\n ind, dep = arg.as_independent(symbol)\n if arg.is_Mul and dep.is_Add:\n a, b, x = _linab(dep, symbol)\n return ind*a, ind*b, x\n if not arg.is_Add:\n b = 0\n a, x = ind, dep\n else:\n b = ind\n a, x = separatevars(dep).as_independent(symbol, as_Add=False)\n if x.could_extract_minus_sign():\n a = -a\n x = -x\n return a, b, x\n\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 136, + "n_words": 56, + "vocab_size": 33, + "complexity": 5, + "nloc": 16, + "token_counts": 118, + "n_ast_nodes": 188, + "n_identifiers": 16, + "d_id": 47919, + "documentation": { + "docstring": "Return ``a, b, X`` assuming ``arg`` can be written as ``a*X + b``\n where ``X`` is a symbol-dependent factor and ``a`` and ``b`` are\n independent of ``symbol``.\n\n Examples\n ========\n\n >>> from sympy.solvers.bivariate import _linab\n >>> from sympy.abc import x, y\n >>> from sympy import exp, S\n >>> _linab(S(2), x)\n (2, 0, 1)\n >>> _linab(2*x, x)\n (2, 0, x)\n >>> _linab(y + y*x + 2*x, x)\n (y + 2, y, x)\n >>> _linab(3 + 2*exp(x), x)\n (2, 3, exp(x))\n ", + "n_words": 78, + "vocab_size": 55, + "n_whitespaces": 126, + "language": "en" + } + }, + { + "id": 267520, + "commit_id": "34f8168afc1d7047c47adec3730c591a58f4f899", + "repo": "ansible", + "path": "lib/ansible/cli/console.py", + "file_name": "console.py", + "fun_name": "__getattr__", + "commit_message": "ansible-console fixes (#78064)\n\n* list collection task actions too\r\n* dynamically add execute/help functions when module is found\r\n* handle redirection and short names", + "code": "def __getattr__(self, name):\n \n attr = None\n\n if name.startswith('do_'):\n module = name.replace('do_', '')\n if module_loader.find_plugin(module):\n setattr(self, name, lambda arg, module=module: self.default(module + ' ' + arg))\n attr = object.__getattr__(self, name)\n elif name.startswith('help_'):\n module = name.replace('help_', '')\n if module_loader.find_plugin(module):\n setattr(self, name, lambda module=module: self.helpdefault(module))\n attr = object.__getattr__(self, name)\n\n if attr is None:\n raise AttributeError(f\"{self.__class__} does not have a {name} attribute\")\n\n return attr\n\n", + "url": "https://github.com/ansible/ansible.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 217, + "n_words": 60, + "vocab_size": 38, + "complexity": 6, + "nloc": 15, + "token_counts": 138, + "n_ast_nodes": 240, + "n_identifiers": 16, + "d_id": 78939, + "documentation": { + "docstring": " handle not found to populate dynamically a module function if module matching name exists ", + "n_words": 14, + "vocab_size": 13, + "n_whitespaces": 15, + "language": "en" + } + }, + { + "id": 247541, + "commit_id": "32c828d0f760492711a98b11376e229d795fd1b3", + "repo": "synapse", + "path": "tests/rest/media/v1/test_url_preview.py", + "file_name": "test_url_preview.py", + "fun_name": "test_blacklisted_ip_specific", + "commit_message": "Add type hints to `tests/rest`. (#12208)\n\nCo-authored-by: Patrick Cloke ", + "code": "def test_blacklisted_ip_specific(self) -> None:\n \n self.lookups[\"example.com\"] = [(IPv4Address, \"192.168.1.1\")]\n\n channel = self.make_request(\n \"GET\", \"preview_url?url=http://example.com\", shorthand=False\n )\n\n # No requests made.\n self.assertEqual(len(self.reactor.tcpClients), 0)\n self.assertEqual(channel.code, 502)\n self.assertEqual(\n channel.json_body,\n {\n \"errcode\": \"M_UNKNOWN\",\n \"error\": \"DNS resolution failure during URL preview generation\",\n },\n )\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 175, + "n_words": 38, + "vocab_size": 36, + "complexity": 1, + "nloc": 17, + "token_counts": 81, + "n_ast_nodes": 139, + "n_identifiers": 13, + "d_id": 71729, + "documentation": { + "docstring": "\n Blacklisted IP addresses, found via DNS, are not spidered.\n ", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 24, + "language": "en" + } + }, + { + "id": 206254, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/template/defaulttags.py", + "file_name": "defaulttags.py", + "fun_name": "verbatim", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def verbatim(parser, token):\n \n nodelist = parser.parse((\"endverbatim\",))\n parser.delete_first_token()\n return VerbatimNode(nodelist.render(Context()))\n\n\n@register.tag", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "@register.tag", + "n_ast_errors": 1, + "ast_levels": 11, + "n_whitespaces": 21, + "n_words": 10, + "vocab_size": 10, + "complexity": 1, + "nloc": 4, + "token_counts": 36, + "n_ast_nodes": 70, + "n_identifiers": 11, + "d_id": 51443, + "documentation": { + "docstring": "\n Stop the template engine from rendering the contents of this block tag.\n\n Usage::\n\n {% verbatim %}\n {% don't process this %}\n {% endverbatim %}\n\n You can also designate a specific closing tag block (allowing the\n unrendered use of ``{% endverbatim %}``)::\n\n {% verbatim myblock %}\n ...\n {% endverbatim myblock %}\n ", + "n_words": 50, + "vocab_size": 33, + "n_whitespaces": 116, + "language": "en" + } + }, + { + "id": 201942, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "tests/bulk_create/tests.py", + "file_name": "tests.py", + "fun_name": "test_large_batch_mixed_efficiency", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def test_large_batch_mixed_efficiency(self):\n \n with override_settings(DEBUG=True):\n connection.queries_log.clear()\n TwoFields.objects.bulk_create(\n [\n TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i + 1)\n for i in range(100000, 101000)\n ]\n )\n self.assertLess(len(connection.queries), 10)\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 155, + "n_words": 29, + "vocab_size": 28, + "complexity": 3, + "nloc": 10, + "token_counts": 76, + "n_ast_nodes": 120, + "n_identifiers": 18, + "d_id": 50024, + "documentation": { + "docstring": "\n Test inserting a large batch with objects having primary key set\n mixed together with objects without PK set.\n ", + "n_words": 18, + "vocab_size": 16, + "n_whitespaces": 40, + "language": "en" + } + }, + { + "id": 47650, + "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", + "repo": "airflow", + "path": "tests/operators/test_subdag_operator.py", + "file_name": "test_subdag_operator.py", + "fun_name": "test_subdag_pools", + "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", + "code": "def test_subdag_pools(self):\n \n dag = DAG('parent', default_args=default_args)\n subdag = DAG('parent.child', default_args=default_args)\n\n session = airflow.settings.Session()\n pool_1 = airflow.models.Pool(pool='test_pool_1', slots=1)\n pool_10 = airflow.models.Pool(pool='test_pool_10', slots=10)\n session.add(pool_1)\n session.add(pool_10)\n session.commit()\n\n EmptyOperator(task_id='dummy', dag=subdag, pool='test_pool_1')\n\n with pytest.raises(AirflowException):\n SubDagOperator(task_id='child', dag=dag, subdag=subdag, pool='test_pool_1')\n\n # recreate dag because failed subdagoperator was already added\n dag = DAG('parent', default_args=default_args)\n SubDagOperator(task_id='child', dag=dag, subdag=subdag, pool='test_pool_10')\n\n session.delete(pool_1)\n session.delete(pool_10)\n session.commit()\n", + "url": "https://github.com/apache/airflow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 183, + "n_words": 53, + "vocab_size": 38, + "complexity": 1, + "nloc": 17, + "token_counts": 169, + "n_ast_nodes": 287, + "n_identifiers": 25, + "d_id": 9191, + "documentation": { + "docstring": "\n Subdags and subdag tasks can't both have a pool with 1 slot\n ", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 27, + "language": "en" + } + }, + { + "id": 259435, + "commit_id": "75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc", + "repo": "scikit-learn", + "path": "sklearn/_loss/tests/test_loss.py", + "file_name": "test_loss.py", + "fun_name": "test_loss_of_perfect_prediction", + "commit_message": "ENH migrate GLMs / TweedieRegressor to linear loss (#22548)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Thomas J. Fan ", + "code": "def test_loss_of_perfect_prediction(loss, sample_weight):\n \n if not loss.is_multiclass:\n # Use small values such that exp(value) is not nan.\n raw_prediction = np.array([-10, -0.1, 0, 0.1, 3, 10])\n # If link is identity, we must respect the interval of y_pred:\n if isinstance(loss.link, IdentityLink):\n eps = 1e-10\n low = loss.interval_y_pred.low\n if not loss.interval_y_pred.low_inclusive:\n low = low + eps\n high = loss.interval_y_pred.high\n if not loss.interval_y_pred.high_inclusive:\n high = high - eps\n raw_prediction = np.clip(raw_prediction, low, high)\n y_true = loss.link.inverse(raw_prediction)\n else:\n # HalfMultinomialLoss\n y_true = np.arange(loss.n_classes).astype(float)\n # raw_prediction with entries -exp(10), but +exp(10) on the diagonal\n # this is close enough to np.inf which would produce nan\n raw_prediction = np.full(\n shape=(loss.n_classes, loss.n_classes),\n fill_value=-np.exp(10),\n dtype=float,\n )\n raw_prediction.flat[:: loss.n_classes + 1] = np.exp(10)\n\n if sample_weight == \"range\":\n sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])\n\n loss_value = loss.loss(\n y_true=y_true,\n raw_prediction=raw_prediction,\n sample_weight=sample_weight,\n )\n constant_term = loss.constant_to_optimal_zero(\n y_true=y_true, sample_weight=sample_weight\n )\n # Comparing loss_value + constant_term to zero would result in large\n # round-off errors.\n assert_allclose(loss_value, -constant_term, atol=1e-14, rtol=1e-15)\n\n\n@pytest.mark.parametrize(\"loss\", LOSS_INSTANCES, ids=loss_instance_name)\n@pytest.mark.parametrize(\"sample_weight\", [None, \"range\"])", + "url": "https://github.com/scikit-learn/scikit-learn.git", + "language": "Python", + "ast_errors": "@pytest.mark.parametrize(\"loss\", LOSS_INSTANCES, ids=loss_instance_name)\n@pytest.mark.parametrize(\"sample_weight\", [None, \"range\"])", + "n_ast_errors": 1, + "ast_levels": 15, + "n_whitespaces": 438, + "n_words": 159, + "vocab_size": 110, + "complexity": 6, + "nloc": 32, + "token_counts": 266, + "n_ast_nodes": 446, + "n_identifiers": 43, + "d_id": 75769, + "documentation": { + "docstring": "Test value of perfect predictions.\n\n Loss of y_pred = y_true plus constant_to_optimal_zero should sums up to\n zero.\n ", + "n_words": 17, + "vocab_size": 16, + "n_whitespaces": 26, + "language": "en" + } + }, + { + "id": 109381, + "commit_id": "a17f4f3bd63e3ca3754f96d7db4ce5197720589b", + "repo": "matplotlib", + "path": "lib/matplotlib/pyplot.py", + "file_name": "pyplot.py", + "fun_name": "set_cmap", + "commit_message": "MNT: convert tests and internal usage way from using mpl.cm.get_cmap", + "code": "def set_cmap(cmap):\n \n cmap = colormaps[cmap]\n\n rc('image', cmap=cmap.name)\n im = gci()\n\n if im is not None:\n im.set_cmap(cmap)\n\n\n@_copy_docstring_and_deprecators(matplotlib.image.imread)", + "url": "https://github.com/matplotlib/matplotlib.git", + "language": "Python", + "ast_errors": "@_copy_docstring_and_deprecators(matplotlib.image.imread)", + "n_ast_errors": 1, + "ast_levels": 9, + "n_whitespaces": 38, + "n_words": 17, + "vocab_size": 15, + "complexity": 2, + "nloc": 6, + "token_counts": 39, + "n_ast_nodes": 82, + "n_identifiers": 11, + "d_id": 23562, + "documentation": { + "docstring": "\n Set the default colormap, and applies it to the current image if any.\n\n Parameters\n ----------\n cmap : `~matplotlib.colors.Colormap` or str\n A colormap instance or the name of a registered colormap.\n\n See Also\n --------\n colormaps\n matplotlib.cm.register_cmap\n matplotlib.cm.get_cmap\n ", + "n_words": 36, + "vocab_size": 33, + "n_whitespaces": 74, + "language": "en" + } + }, + { + "id": 241581, + "commit_id": "650c710efacd633fa283955145342bb64063c883", + "repo": "lightning", + "path": "tests/strategies/test_sharded_strategy.py", + "file_name": "test_sharded_strategy.py", + "fun_name": "test_ddp_sharded_strategy_fit_ckpt_path", + "commit_message": "Rename training plugin test files & names to strategy (#11303)", + "code": "def test_ddp_sharded_strategy_fit_ckpt_path(tmpdir):\n \n model = BoringModel()\n trainer = Trainer(strategy=\"ddp_sharded_spawn\", num_processes=2, fast_dev_run=True)\n\n trainer.fit(model)\n\n checkpoint_path = os.path.join(tmpdir, \"model.pt\")\n trainer.save_checkpoint(checkpoint_path)\n\n model = BoringModel()\n\n trainer = Trainer(strategy=\"ddp_sharded_spawn\", num_processes=2, fast_dev_run=True)\n\n trainer.fit(model, ckpt_path=checkpoint_path)\n\n\n@pytest.mark.skip(reason=\"Not a critical test, skip till drone CI performance improves.\") # todo\n@pytest.mark.skip(reason=\"Currently unsupported restarting training on different number of devices.\")\n@RunIf(min_gpus=2, skip_windows=True, fairscale=True)", + "url": "https://github.com/Lightning-AI/lightning.git", + "language": "Python", + "ast_errors": "@pytest.mark.skip(reason=\"Not a critical test, skip till drone CI performance improves.\") # todo\n@pytest.mark.skip(reason=\"Currently unsupported restarting training on different number of devices.\")\n@RunIf(min_gpus=2, skip_windows=True, fairscale=True)", + "n_ast_errors": 1, + "ast_levels": 10, + "n_whitespaces": 75, + "n_words": 50, + "vocab_size": 40, + "complexity": 1, + "nloc": 9, + "token_counts": 82, + "n_ast_nodes": 197, + "n_identifiers": 24, + "d_id": 69606, + "documentation": { + "docstring": "Test to ensure that resuming from checkpoint works.", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 63100, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py", + "file_name": "__init__.py", + "fun_name": "get_provider", + "commit_message": "upd; format", + "code": "def get_provider(moduleOrReq):\n \n if isinstance(moduleOrReq, Requirement):\n return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]\n try:\n module = sys.modules[moduleOrReq]\n except KeyError:\n __import__(moduleOrReq)\n module = sys.modules[moduleOrReq]\n loader = getattr(module, '__loader__', None)\n return _find_adapter(_provider_factories, loader)(module)\n\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 73, + "n_words": 27, + "vocab_size": 22, + "complexity": 4, + "nloc": 10, + "token_counts": 77, + "n_ast_nodes": 124, + "n_identifiers": 17, + "d_id": 13146, + "documentation": { + "docstring": "Return an IResourceProvider for the named module or requirement", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 44167, + "commit_id": "ff3bbc3db24f9f3f4f88033d48859fb08fc3237b", + "repo": "airflow", + "path": "airflow/models/base.py", + "file_name": "base.py", + "fun_name": "get_template_env", + "commit_message": "Implement enough interface for MappedOperator to be baggable (#20945)", + "code": "def get_template_env(self) -> jinja2.Environment:\n \n dag = self.get_dag()\n if dag:\n return dag.get_template_env()\n return SandboxedEnvironment(cache_size=0)\n", + "url": "https://github.com/apache/airflow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 52, + "n_words": 13, + "vocab_size": 12, + "complexity": 2, + "nloc": 6, + "token_counts": 33, + "n_ast_nodes": 57, + "n_identifiers": 8, + "d_id": 8186, + "documentation": { + "docstring": "Fetch a Jinja template environment from the DAG or instantiate empty environment if no DAG.", + "n_words": 15, + "vocab_size": 14, + "n_whitespaces": 14, + "language": "en" + } + }, + { + "id": 204312, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/contrib/sessions/backends/signed_cookies.py", + "file_name": "signed_cookies.py", + "fun_name": "load", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def load(self):\n \n try:\n return signing.loads(\n self.session_key,\n serializer=self.serializer,\n # This doesn't handle non-default expiry dates, see #19201\n max_age=self.get_session_cookie_age(),\n salt=\"django.contrib.sessions.backends.signed_cookies\",\n )\n except Exception:\n # BadSignature, ValueError, or unpickling exceptions. If any of\n # these happen, reset the session.\n self.create()\n return {}\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 197, + "n_words": 39, + "vocab_size": 36, + "complexity": 2, + "nloc": 11, + "token_counts": 47, + "n_ast_nodes": 81, + "n_identifiers": 11, + "d_id": 50693, + "documentation": { + "docstring": "\n Load the data from the key itself instead of fetching from some\n external data store. Opposite of _get_session_key(), raise BadSignature\n if signature fails.\n ", + "n_words": 23, + "vocab_size": 19, + "n_whitespaces": 52, + "language": "en" + } + }, + { + "id": 217427, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/ftplib.py", + "file_name": "ftplib.py", + "fun_name": "mlsd", + "commit_message": "add python 3.10.4 for windows", + "code": "def mlsd(self, path=\"\", facts=[]):\n \n if facts:\n self.sendcmd(\"OPTS MLST \" + \";\".join(facts) + \";\")\n if path:\n cmd = \"MLSD %s\" % path\n else:\n cmd = \"MLSD\"\n lines = []\n self.retrlines(cmd, lines.append)\n for line in lines:\n facts_found, _, name = line.rstrip(CRLF).partition(' ')\n entry = {}\n for fact in facts_found[:-1].split(\";\"):\n key, _, value = fact.partition(\"=\")\n entry[key.lower()] = value\n yield (name, entry)\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 214, + "n_words": 58, + "vocab_size": 45, + "complexity": 5, + "nloc": 16, + "token_counts": 129, + "n_ast_nodes": 222, + "n_identifiers": 23, + "d_id": 54776, + "documentation": { + "docstring": "List a directory in a standardized format by using MLSD\n command (RFC-3659). If path is omitted the current directory\n is assumed. \"facts\" is a list of strings representing the type\n of information desired (e.g. [\"type\", \"size\", \"perm\"]).\n\n Return a generator object yielding a tuple of two elements\n for every file found in path.\n First element is the file name, the second one is a dictionary\n including a variable number of \"facts\" depending on the server\n and whether \"facts\" argument has been provided.\n ", + "n_words": 82, + "vocab_size": 60, + "n_whitespaces": 145, + "language": "en" + } + }, + { + "id": 147576, + "commit_id": "2eaa54bd763ae0e63158ae0d939633c804394b78", + "repo": "ray", + "path": "rllib/agents/trainer_config.py", + "file_name": "trainer_config.py", + "fun_name": "callbacks", + "commit_message": "[RLlib] POC: Config objects instead of dicts (PPO only). (#23491)", + "code": "def callbacks(self, callbacks_class) -> \"TrainerConfig\":\n \n self.callbacks_class = callbacks_class\n\n return self\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 7, + "n_whitespaces": 31, + "n_words": 10, + "vocab_size": 10, + "complexity": 1, + "nloc": 14, + "token_counts": 17, + "n_ast_nodes": 31, + "n_identifiers": 3, + "d_id": 34012, + "documentation": { + "docstring": "Sets the callbacks configuration.\n\n Args:\n callbacks_class: Callbacks class, whose methods will be run during\n various phases of training and environment sample collection.\n See the `DefaultCallbacks` class and\n `examples/custom_metrics_and_callbacks.py` for more usage information.\n\n Returns:\n This updated TrainerConfig object.\n ", + "n_words": 37, + "vocab_size": 35, + "n_whitespaces": 125, + "language": "en" + } + }, + { + "id": 284582, + "commit_id": "a6f7e111e68346aeab315985b3704c2710693b38", + "repo": "OpenBBTerminal", + "path": "openbb_terminal/cryptocurrency/due_diligence/dd_controller.py", + "file_name": "dd_controller.py", + "fun_name": "print_help", + "commit_message": "Bounty Hunter mood: 11 bugs fixed (#1853)\n\n* fix #1850\r\n\r\n* fix #1831\r\n\r\n* add extra check to Reddit API keys\r\n\r\n* ignore warning message to update praw api\r\n\r\n* improve OpenBB links\r\n\r\n* fix quick performance only on stocks class because I'm James bitch\r\n\r\n* fix quick performance only on stocks class because I'm James bitch\r\n\r\n* fix #1829\r\n\r\n* fix #1821\r\n\r\n* add messari to keys - fix #1819\r\n\r\n* example of multiple oclumns to check on options/chains\r\n\r\n* minor improvement in xlabel re. #1814\r\n\r\n* remove repeated command\r\n\r\n* fix #1698\r\n\r\n* fix line too long\r\n\r\n* fix #1814 fr now\r\n\r\n* fix tests", + "code": "def print_help(self):\n \n source_txt = CRYPTO_SOURCES.get(self.source, \"?\") if self.source != \"\" else \"\"\n help_text = f\n console.print(text=help_text, menu=\"Crypto - Due Diligence\")\n", + "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 48, + "n_words": 20, + "vocab_size": 18, + "complexity": 2, + "nloc": 56, + "token_counts": 42, + "n_ast_nodes": 86, + "n_identifiers": 12, + "d_id": 84832, + "documentation": { + "docstring": "Print help[cmds]\n load load a specific cryptocurrency for analysis\n\n[param]Coin: [/param]{self.coin}\n[param]Source: [/param]{source_txt}\n\n[src]CoinGecko[/src]\n info basic information about loaded coin\n market market stats about loaded coin\n ath all time high related stats for loaded coin\n atl all time low related stats for loaded coin\n web found websites for loaded coin e.g forum, homepage\n social social portals urls for loaded coin, e.g reddit, twitter\n score different kind of scores for loaded coin, e.g developer score, sentiment score\n dev github, bitbucket coin development statistics\n bc links to blockchain explorers for loaded coin\n[src]Glassnode[/src]\n active active addresses\n nonzero addresses with non-zero balances\n change 30d change of supply held on exchange wallets\n eb total balance held on exchanges (in percentage and units)\n[src]Coinglass[/src]\n oi open interest per exchange\n[src]CoinPaprika[/src]\n basic basic information about loaded coin\n ps price and supply related metrics for loaded coin\n mkt all markets for loaded coin\n ex all exchanges where loaded coin is listed\n twitter tweets for loaded coin\n events events related to loaded coin\n[src]Binance[/src]\n binbook order book\n balance coin balance\n[src]Coinbase[/src]\n cbbook order book\n trades last trades\n stats coin stats\n[src]Messari[/src]\n mcapdom market cap dominance\n mt messari timeseries e.g. twitter followers, circ supply, etc\n rm roadmap\n tk tokenomics e.g. circulating/max/total supply, emission type, etc\n pi project information e.g. technology details, public repos, audits, vulns\n team contributors (individuals and organizations)\n inv investors (individuals and organizations)\n gov governance details\n fr fundraising details e.g. treasury accounts, sales rounds, allocation\n links links e.g. whitepaper, github, twitter, youtube, reddit, telegram\n[src]Santiment[/src]\n gh github activity over time\n[src]CryptoPanic[/src]\n news loaded coin's most recent news[/cmds]\n", + "n_words": 260, + "vocab_size": 163, + "n_whitespaces": 814, + "language": "en" + } + }, + { + "id": 31263, + "commit_id": "a72f1c9f5b907f96cbb7de3bbb02a1d431d34071", + "repo": "transformers", + "path": "src/transformers/models/longt5/modeling_flax_longt5.py", + "file_name": "modeling_flax_longt5.py", + "fun_name": "__call__", + "commit_message": "Add `LongT5` model (#16792)\n\n* Initial commit\r\n\r\n* Make some fixes\r\n\r\n* Make PT model full forward pass\r\n\r\n* Drop TF & Flax implementation, fix copies etc\r\n\r\n* Add Flax model and update some corresponding stuff\r\n\r\n* Drop some TF things\r\n\r\n* Update config and flax local attn\r\n\r\n* Add encoder_attention_type to config\r\n\r\n* .\r\n\r\n* Update docs\r\n\r\n* Do some cleansing\r\n\r\n* Fix some issues -> make style; add some docs\r\n\r\n* Fix position_bias + mask addition + Update tests\r\n\r\n* Fix repo consistency\r\n\r\n* Fix model consistency by removing flax operation over attn_mask\r\n\r\n* [WIP] Add PT TGlobal LongT5\r\n\r\n* .\r\n\r\n* [WIP] Add flax tglobal model\r\n\r\n* [WIP] Update flax model to use the right attention type in the encoder\r\n\r\n* Fix flax tglobal model forward pass\r\n\r\n* Make the use of global_relative_attention_bias\r\n\r\n* Add test suites for TGlobal model\r\n\r\n* Fix minor bugs, clean code\r\n\r\n* Fix pt-flax equivalence though not convinced with correctness\r\n\r\n* Fix LocalAttn implementation to match the original impl. + update READMEs\r\n\r\n* Few updates\r\n\r\n* Update: [Flax] improve large model init and loading #16148\r\n\r\n* Add ckpt conversion script accoring to #16853 + handle torch device placement\r\n\r\n* Minor updates to conversion script.\r\n\r\n* Typo: AutoModelForSeq2SeqLM -> FlaxAutoModelForSeq2SeqLM\r\n\r\n* gpu support + dtype fix\r\n\r\n* Apply some suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Patrick von Platen \r\n\r\n* * Remove (de)parallelize stuff\r\n* Edit shape comments\r\n* Update README.md\r\n* make fix-copies\r\n\r\n* Remove caching logic for local & tglobal attention\r\n\r\n* Apply another batch of suggestions from code review\r\n\r\n* Add missing checkpoints\r\n* Format converting scripts\r\n* Drop (de)parallelize links from longT5 mdx\r\n\r\n* Fix converting script + revert config file change\r\n\r\n* Revert \"Remove caching logic for local & tglobal attention\"\r\n\r\nThis reverts commit 2a619828f6ddc3e65bd9bb1725a12b77fa883a46.\r\n\r\n* Stash caching logic in Flax model\r\n\r\n* Make side relative bias used always\r\n\r\n* Drop caching logic in PT model\r\n\r\n* Return side bias as it was\r\n\r\n* Drop all remaining model parallel logic\r\n\r\n* Remove clamp statements\r\n\r\n* Move test files to the proper place\r\n\r\n* Update docs with new version of hf-doc-builder\r\n\r\n* Fix test imports\r\n\r\n* Make some minor improvements\r\n\r\n* Add missing checkpoints to docs\r\n* Make TGlobal model compatible with torch.onnx.export\r\n* Replace some np.ndarray with jnp.ndarray\r\n\r\n* Fix TGlobal for ONNX conversion + update docs\r\n\r\n* fix _make_global_fixed_block_ids and masked neg value\r\n\r\n* update flax model\r\n\r\n* style and quality\r\n\r\n* fix imports\r\n\r\n* remove load_tf_weights_in_longt5 from init and fix copies\r\n\r\n* add slow test for TGlobal model\r\n\r\n* typo fix\r\n\r\n* Drop obsolete is_parallelizable and one warning\r\n\r\n* Update __init__ files to fix repo-consistency\r\n\r\n* fix pipeline test\r\n\r\n* Fix some device placements\r\n\r\n* [wip]: Update tests -- need to generate summaries to update expected_summary\r\n\r\n* Fix quality\r\n\r\n* Update LongT5 model card\r\n\r\n* Update (slow) summarization tests\r\n\r\n* make style\r\n\r\n* rename checkpoitns\r\n\r\n* finish\r\n\r\n* fix flax tests\r\n\r\nCo-authored-by: phungvanduy \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Patrick von Platen \r\nCo-authored-by: patil-suraj ", + "code": "def __call__(self, hidden_states):\n \n # layer norm should always be calculated in float32\n variance = jnp.power(hidden_states.astype(\"f4\"), 2).mean(axis=-1, keepdims=True)\n hidden_states = hidden_states / jnp.sqrt(variance + self.eps)\n\n return self.weight * hidden_states\n\n\n# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5DenseActDense with T5->LongT5", + "url": "https://github.com/huggingface/transformers.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 68, + "n_words": 34, + "vocab_size": 30, + "complexity": 1, + "nloc": 4, + "token_counts": 55, + "n_ast_nodes": 91, + "n_identifiers": 13, + "d_id": 5713, + "documentation": { + "docstring": "\n Construct a layernorm module in the LongT5 style; No bias and no subtraction of mean.\n ", + "n_words": 15, + "vocab_size": 15, + "n_whitespaces": 30, + "language": "en" + } + }, + { + "id": 155618, + "commit_id": "2a9d34aff0a38be5fc8bfcdec38e5c4a7bafcf0e", + "repo": "dask", + "path": "dask/dataframe/core.py", + "file_name": "core.py", + "fun_name": "partitionwise_graph", + "commit_message": "Move DataFrame ACA aggregations to HLG (#8468)", + "code": "def partitionwise_graph(func, layer_name, *args, **kwargs):\n \n pairs = []\n numblocks = {}\n for arg in args:\n if isinstance(arg, _Frame):\n pairs.extend([arg._name, \"i\"])\n numblocks[arg._name] = (arg.npartitions,)\n elif isinstance(arg, Scalar):\n pairs.extend([arg._name, \"i\"])\n numblocks[arg._name] = (1,)\n elif isinstance(arg, Array):\n if arg.ndim == 1:\n pairs.extend([arg.name, \"i\"])\n elif arg.ndim == 0:\n pairs.extend([arg.name, \"\"])\n elif arg.ndim == 2:\n pairs.extend([arg.name, \"ij\"])\n else:\n raise ValueError(\"Can't add multi-dimensional array to dataframes\")\n numblocks[arg._name] = arg.numblocks\n elif isinstance(arg, BlockwiseDep):\n if len(arg.numblocks) == 1:\n pairs.extend([arg, \"i\"])\n elif len(arg.numblocks) == 2:\n pairs.extend([arg, \"ij\"])\n else:\n raise ValueError(\n f\"BlockwiseDep arg {arg!r} has {len(arg.numblocks)} dimensions; only 1 or 2 are supported.\"\n )\n else:\n pairs.extend([arg, None])\n return blockwise(\n func, layer_name, \"i\", *pairs, numblocks=numblocks, concatenate=True, **kwargs\n )\n\n", + "url": "https://github.com/dask/dask.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 20, + "n_whitespaces": 449, + "n_words": 107, + "vocab_size": 67, + "complexity": 11, + "nloc": 34, + "token_counts": 264, + "n_ast_nodes": 437, + "n_identifiers": 22, + "d_id": 36437, + "documentation": { + "docstring": "\n Apply a function partition-wise across arguments to create layer of a graph\n\n This applies a function, ``func``, in an embarrassingly parallel fashion\n across partitions/chunks in the provided arguments. It handles Dataframes,\n Arrays, and scalars smoothly, and relies on the ``blockwise`` machinery\n to provide a nicely symbolic graph.\n\n It is most commonly used in other graph-building functions to create the\n appropriate layer of the resulting dataframe.\n\n Parameters\n ----------\n func: callable\n layer_name: str\n Descriptive name for the operation. Used as the output name\n in the resulting ``Blockwise`` graph layer.\n *args:\n **kwargs:\n\n Returns\n -------\n out: Blockwise graph\n\n Examples\n --------\n >>> subgraph = partitionwise_graph(function, x, y, z=123) # doctest: +SKIP\n >>> layer = partitionwise_graph(function, df, x, z=123) # doctest: +SKIP\n >>> graph = HighLevelGraph.from_collections(name, layer, dependencies=[df, x]) # doctest: +SKIP\n >>> result = new_dd_object(graph, name, metadata, df.divisions) # doctest: +SKIP\n\n See Also\n --------\n map_partitions\n ", + "n_words": 140, + "vocab_size": 95, + "n_whitespaces": 238, + "language": "en" + } + }, + { + "id": 220825, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/asyncio/tasks.py", + "file_name": "tasks.py", + "fun_name": "cancel", + "commit_message": "add python 3.10.4 for windows", + "code": "def cancel(self, msg=None):\n \n self._log_traceback = False\n if self.done():\n return False\n if self._fut_waiter is not None:\n if self._fut_waiter.cancel(msg=msg):\n # Leave self._fut_waiter; it may be a Task that\n # catches and ignores the cancellation so we may have\n # to cancel it again later.\n return True\n # It must be the case that self.__step is already scheduled.\n self._must_cancel = True\n self._cancel_message = msg\n return True\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 201, + "n_words": 63, + "vocab_size": 45, + "complexity": 4, + "nloc": 10, + "token_counts": 58, + "n_ast_nodes": 98, + "n_identifiers": 8, + "d_id": 56134, + "documentation": { + "docstring": "Request that this task cancel itself.\n\n This arranges for a CancelledError to be thrown into the\n wrapped coroutine on the next cycle through the event loop.\n The coroutine then has a chance to clean up or even deny\n the request using try/except/finally.\n\n Unlike Future.cancel, this does not guarantee that the\n task will be cancelled: the exception might be caught and\n acted upon, delaying cancellation of the task or preventing\n cancellation completely. The task may also return a value or\n raise a different exception.\n\n Immediately after this method is called, Task.cancelled() will\n not return True (unless the task was already cancelled). A\n task will be marked as cancelled when the wrapped coroutine\n terminates with a CancelledError exception (even if cancel()\n was not called).\n ", + "n_words": 122, + "vocab_size": 83, + "n_whitespaces": 229, + "language": "en" + } + }, + { + "id": 60671, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_internal/configuration.py", + "file_name": "configuration.py", + "fun_name": "_dictionary", + "commit_message": "upd; format", + "code": "def _dictionary(self):\n # type: () -> Dict[str, Any]\n \n # NOTE: Dictionaries are not populated if not loaded. So, conditionals\n # are not needed here.\n retval = {}\n\n for variant in OVERRIDE_ORDER:\n retval.update(self._config[variant])\n\n return retval\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 100, + "n_words": 34, + "vocab_size": 28, + "complexity": 2, + "nloc": 5, + "token_counts": 28, + "n_ast_nodes": 50, + "n_identifiers": 7, + "d_id": 12237, + "documentation": { + "docstring": "A dictionary representing the loaded configuration.\n ", + "n_words": 6, + "vocab_size": 6, + "n_whitespaces": 13, + "language": "en" + } + }, + { + "id": 121058, + "commit_id": "4e224bcfb99c3bd9b6a32b8ad7836d12517e788f", + "repo": "jax", + "path": "jax/experimental/jax2tf/impl_no_xla.py", + "file_name": "impl_no_xla.py", + "fun_name": "_transpose_for_tf_conv", + "commit_message": "[jax2tf] Add support for common audio convolutions (1D variants, dilated depthwise, transpose with SAME padding).\n\nPiperOrigin-RevId: 458266485", + "code": "def _transpose_for_tf_conv(lhs, rhs, dimension_numbers):\n \n # TODO(marcvanzee): Add tests for this ops for shape polymorphism.\n lhs_perm, rhs_perm, _ = dimension_numbers\n\n # TODO(marcvanzee): Consider merging tranposes if we want to optimize.\n # For `lhs_perm` / `output_perm`, perm (0, 1, 2, 3) corresponds to \"NCHW\".\n lhs = tf.transpose(lhs, lhs_perm) # lhs --> \"NCHW\"\n if len(lhs_perm) == 3:\n # For 1D convolution, we add a trivial \"W\" dimension, so that 2D Convolution\n # logic can be applied downstream.\n lhs = lhs[:, :, :, np.newaxis]\n # However, the TF ops only support \"NHWC\" on CPU, so we transpose again.\n lhs = tf.transpose(lhs, (0, 2, 3, 1)) # \"NCHW\" --> \"NHWC\"\n\n # For `rhs_perm`, perm (0, 1, 2, 3) corresponds to \"OIHW\".\n rhs = tf.transpose(rhs, rhs_perm) # rhs --> \"OIHW\"\n # Handle conv1d case.\n if len(rhs_perm) == 3:\n rhs = rhs[:, :, :, np.newaxis]\n # For the tf ops, rhs is expected to be \"OIHW\".\n rhs = tf.transpose(rhs, (2, 3, 1, 0)) # \"OIHW\" --> \"HWIO\"\n return lhs, rhs\n\n", + "url": "https://github.com/google/jax.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 195, + "n_words": 163, + "vocab_size": 96, + "complexity": 3, + "nloc": 11, + "token_counts": 121, + "n_ast_nodes": 192, + "n_identifiers": 12, + "d_id": 27024, + "documentation": { + "docstring": "Tranposes lhs and rhs to respectively NHWC and HWIO so they can be passed to TF functions.", + "n_words": 17, + "vocab_size": 15, + "n_whitespaces": 16, + "language": "en" + } + }, + { + "id": 208740, + "commit_id": "d858213d4088237e1481038865bc52ccdd074053", + "repo": "ipython", + "path": "IPython/lib/tests/test_pretty.py", + "file_name": "test_pretty.py", + "fun_name": "test_pprint_heap_allocated_type", + "commit_message": "xxlimited_35 module now has the same name in repr in Py 3.11\n\nSee https://github.com/python/cpython/commit/a87c9b538fbfc42883417c4d5e69f1a5922690e3", + "code": "def test_pprint_heap_allocated_type():\n \n module_name = \"xxlimited\" if sys.version_info < (3, 10) else \"xxlimited_35\"\n expected_output = (\n \"xxlimited.Null\" if sys.version_info < (3, 11) else \"xxlimited_35.Null\"\n )\n xxlimited = pytest.importorskip(module_name)\n output = pretty.pretty(xxlimited.Null)\n assert output == expected_output\n\n", + "url": "https://github.com/ipython/ipython.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 62, + "n_words": 34, + "vocab_size": 24, + "complexity": 3, + "nloc": 8, + "token_counts": 59, + "n_ast_nodes": 100, + "n_identifiers": 11, + "d_id": 52497, + "documentation": { + "docstring": "\n Test that pprint works for heap allocated types.\n ", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 15, + "language": "en" + } + }, + { + "id": 250281, + "commit_id": "652d1669c5a103b1c20478770c4aaf18849c09a3", + "repo": "synapse", + "path": "tests/handlers/test_e2e_room_keys.py", + "file_name": "test_e2e_room_keys.py", + "fun_name": "test_delete_missing_version", + "commit_message": "Add missing type hints to tests.handlers. (#14680)\n\nAnd do not allow untyped defs in tests.handlers.", + "code": "def test_delete_missing_version(self) -> None:\n \n e = self.get_failure(\n self.handler.delete_version(self.local_user, \"1\"), SynapseError\n )\n res = e.value.code\n self.assertEqual(res, 404)\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 62, + "n_words": 16, + "vocab_size": 15, + "complexity": 1, + "nloc": 7, + "token_counts": 44, + "n_ast_nodes": 72, + "n_identifiers": 12, + "d_id": 73359, + "documentation": { + "docstring": "Check that we get a 404 on deleting nonexistent versions", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 20087, + "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", + "repo": "pipenv", + "path": "pipenv/patched/notpip/_vendor/distro.py", + "file_name": "distro.py", + "fun_name": "name", + "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", + "code": "def name(self, pretty=False):\n # type: (bool) -> str\n \n name = (\n self.os_release_attr(\"name\")\n or self.lsb_release_attr(\"distributor_id\")\n or self.distro_release_attr(\"name\")\n or self.uname_attr(\"name\")\n )\n if pretty:\n name = self.os_release_attr(\"pretty_name\") or self.lsb_release_attr(\n \"description\"\n )\n if not name:\n name = self.distro_release_attr(\"name\") or self.uname_attr(\"name\")\n version = self.version(pretty=True)\n if version:\n name = name + \" \" + version\n return name or \"\"\n", + "url": "https://github.com/pypa/pipenv.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 251, + "n_words": 53, + "vocab_size": 31, + "complexity": 10, + "nloc": 17, + "token_counts": 102, + "n_ast_nodes": 186, + "n_identifiers": 8, + "d_id": 3230, + "documentation": { + "docstring": "\n Return the name of the OS distribution, as a string.\n\n For details, see :func:`distro.name`.\n ", + "n_words": 14, + "vocab_size": 13, + "n_whitespaces": 36, + "language": "en" + } + }, + { + "id": 44997, + "commit_id": "0ebd6428e6b484790bfbbe1b8687ef4e6cae10e9", + "repo": "airflow", + "path": "tests/serialization/test_dag_serialization.py", + "file_name": "test_dag_serialization.py", + "fun_name": "test_extra_serialized_field_and_multiple_operator_links", + "commit_message": "Switch XCom implementation to use run_id (#20975)", + "code": "def test_extra_serialized_field_and_multiple_operator_links(self, dag_maker):\n \n test_date = timezone.DateTime(2019, 8, 1, tzinfo=timezone.utc)\n with dag_maker(dag_id='simple_dag', start_date=test_date) as dag:\n CustomOperator(task_id='simple_task', bash_command=[\"echo\", \"true\"])\n\n serialized_dag = SerializedDAG.to_dict(dag)\n assert \"bash_command\" in serialized_dag[\"dag\"][\"tasks\"][0]\n\n dag = SerializedDAG.from_dict(serialized_dag)\n simple_task = dag.task_dict[\"simple_task\"]\n assert getattr(simple_task, \"bash_command\") == [\"echo\", \"true\"]\n\n #########################################################\n # Verify Operator Links work with Serialized Operator\n #########################################################\n # Check Serialized version of operator link only contains the inbuilt Op Link\n assert serialized_dag[\"dag\"][\"tasks\"][0][\"_operator_extra_links\"] == [\n {'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},\n {'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},\n ]\n\n # Test all the extra_links are set\n assert set(simple_task.extra_links) == {\n 'BigQuery Console #1',\n 'BigQuery Console #2',\n 'airflow',\n 'github',\n 'google',\n }\n\n dag_maker.create_dagrun(execution_date=test_date)\n XCom.set(\n key='search_query',\n value=[\"dummy_value_1\", \"dummy_value_2\"],\n task_id=simple_task.task_id,\n dag_id=simple_task.dag_id,\n execution_date=test_date,\n )\n\n # Test Deserialized inbuilt link #1\n custom_inbuilt_link = simple_task.get_extra_links(test_date, \"BigQuery Console #1\")\n assert 'https://console.cloud.google.com/bigquery?j=dummy_value_1' == custom_inbuilt_link\n\n # Test Deserialized inbuilt link #2\n custom_inbuilt_link = simple_task.get_extra_links(test_date, \"BigQuery Console #2\")\n assert 'https://console.cloud.google.com/bigquery?j=dummy_value_2' == custom_inbuilt_link\n\n # Test Deserialized link registered via Airflow Plugin\n google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)\n assert \"https://www.google.com\" == google_link_from_plugin\n", + "url": "https://github.com/apache/airflow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 494, + "n_words": 148, + "vocab_size": 98, + "complexity": 1, + "nloc": 34, + "token_counts": 246, + "n_ast_nodes": 430, + "n_identifiers": 33, + "d_id": 8435, + "documentation": { + "docstring": "\n Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.\n\n This tests also depends on GoogleLink() registered as a plugin\n in tests/plugins/test_plugin.py\n\n The function tests that if extra operator links are registered in plugin\n in ``operator_extra_links`` and the same is also defined in\n the Operator in ``BaseOperator.operator_extra_links``, it has the correct\n extra link.\n ", + "n_words": 56, + "vocab_size": 40, + "n_whitespaces": 113, + "language": "en" + } + }, + { + "id": 313595, + "commit_id": "a0974e0c7297537149985f93544dd6f8ed8cfded", + "repo": "core", + "path": "homeassistant/components/lifx/light.py", + "file_name": "light.py", + "fun_name": "get_mac_addr", + "commit_message": "Refactor LIFX discovery to prevent duplicate discovery response handling (#72213)\n\n* Partially revert #70458 and allow duplicate LIFX discoveries\r\n\r\nSigned-off-by: Avi Miller \r\n\r\n* Only process one discovery at a time\r\n\r\n* Revert all LIFX duplicate/inflight discovery checks\r\n\r\nAlso remember LIFX Switches and do as little processing for them\r\nas possible.\r\n\r\nSigned-off-by: Avi Miller \r\n\r\n* Bump aiolifx version to support the latest LIFX devices\r\n\r\nLIFX added 22 new product definitions to their public product\r\nlist at the end of January and those new products are defined in\r\naiolifx v0.8.1, so bump the dependency version.\r\n\r\nAlso switched to testing for relays instead of maintaining a\r\nseperate list of switch product IDs.\r\n\r\nFixes #72894.\r\n\r\nSigned-off-by: Avi Miller \r\n\r\n* Refactor LIFX discovery to better handle duplicate responses\r\n\r\nSigned-off-by: Avi Miller \r\n\r\n* Update clear_inflight_discovery with review suggestion\r\n\r\nSigned-off-by: Avi Miller \r\n\r\n* Move the existing entity check to before the asyncio lock\r\n\r\nSigned-off-by: Avi Miller \r\n\r\n* Bail out of discovery early and if an entity was created\r\n\r\nAlso ensure that the entity always has a unique ID even if the bulb was\r\nnot successfully discovered.\r\n\r\nSigned-off-by: Avi Miller \r\n\r\nCo-authored-by: J. Nick Koston ", + "code": "def get_mac_addr(self):\n \n if (\n self.bulb.host_firmware_version\n and AwesomeVersion(self.bulb.host_firmware_version) >= FIX_MAC_FW\n ):\n octets = [int(octet, 16) for octet in self.mac_addr.split(\":\")]\n octets[5] = (octets[5] + 1) % 256\n return \":\".join(f\"{octet:02x}\" for octet in octets)\n return self.mac_addr\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 116, + "n_words": 33, + "vocab_size": 28, + "complexity": 5, + "nloc": 9, + "token_counts": 78, + "n_ast_nodes": 131, + "n_identifiers": 12, + "d_id": 112213, + "documentation": { + "docstring": "Increment the last byte of the mac address by one for FW>3.70.", + "n_words": 12, + "vocab_size": 11, + "n_whitespaces": 11, + "language": "en" + } + }, + { + "id": 271550, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/engine/training.py", + "file_name": "training.py", + "fun_name": "metrics_names", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def metrics_names(self):\n \n\n # This property includes all output names including `loss` and per-output\n # losses for backward compatibility.\n return [m.name for m in self.metrics]\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 52, + "n_words": 24, + "vocab_size": 22, + "complexity": 2, + "nloc": 2, + "token_counts": 18, + "n_ast_nodes": 32, + "n_identifiers": 5, + "d_id": 80791, + "documentation": { + "docstring": "Returns the model's display labels for all outputs.\n\n Note: `metrics_names` are available only after a `keras.Model` has been\n trained/evaluated on actual data.\n\n Examples:\n\n >>> inputs = tf.keras.layers.Input(shape=(3,))\n >>> outputs = tf.keras.layers.Dense(2)(inputs)\n >>> model = tf.keras.models.Model(inputs=inputs, outputs=outputs)\n >>> model.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"mae\"])\n >>> model.metrics_names\n []\n\n >>> x = np.random.random((2, 3))\n >>> y = np.random.randint(0, 2, (2, 2))\n >>> model.fit(x, y)\n >>> model.metrics_names\n ['loss', 'mae']\n\n >>> inputs = tf.keras.layers.Input(shape=(3,))\n >>> d = tf.keras.layers.Dense(2, name='out')\n >>> output_1 = d(inputs)\n >>> output_2 = d(inputs)\n >>> model = tf.keras.models.Model(\n ... inputs=inputs, outputs=[output_1, output_2])\n >>> model.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"mae\", \"acc\"])\n >>> model.fit(x, (y, y))\n >>> model.metrics_names\n ['loss', 'out_loss', 'out_1_loss', 'out_mae', 'out_acc', 'out_1_mae',\n 'out_1_acc']\n\n ", + "n_words": 105, + "vocab_size": 70, + "n_whitespaces": 290, + "language": "en" + } + }, + { + "id": 155542, + "commit_id": "dfdc4bbab43678927e30866f06df509483ac5d24", + "repo": "dask", + "path": "dask/highlevelgraph.py", + "file_name": "highlevelgraph.py", + "fun_name": "from_collections", + "commit_message": "Collections with HLG must always implement __dask_layers__ (#8548)\n\nWithout ``__dask_layers__``, ``HighLevelGraph.from_collections`` will produce a broken layer dependency graph and things will fall apart down the line. Given this, it seems likely that no users should have the use case in production, so it should be safe not to have a deprecation cycle here.", + "code": "def from_collections(cls, name, layer, dependencies=()):\n \n if len(dependencies) == 1:\n return cls._from_collection(name, layer, dependencies[0])\n layers = {name: layer}\n deps = {name: set()}\n for collection in toolz.unique(dependencies, key=id):\n if is_dask_collection(collection):\n graph = collection.__dask_graph__()\n if isinstance(graph, HighLevelGraph):\n layers.update(graph.layers)\n deps.update(graph.dependencies)\n deps[name] |= set(collection.__dask_layers__())\n else:\n key = _get_some_layer_name(collection)\n layers[key] = graph\n deps[name].add(key)\n deps[key] = set()\n else:\n raise TypeError(type(collection))\n\n return cls(layers, deps)\n", + "url": "https://github.com/dask/dask.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 324, + "n_words": 56, + "vocab_size": 44, + "complexity": 5, + "nloc": 53, + "token_counts": 165, + "n_ast_nodes": 260, + "n_identifiers": 26, + "d_id": 36421, + "documentation": { + "docstring": "Construct a HighLevelGraph from a new layer and a set of collections\n\n This constructs a HighLevelGraph in the common case where we have a single\n new layer and a set of old collections on which we want to depend.\n\n This pulls out the ``__dask_layers__()`` method of the collections if\n they exist, and adds them to the dependencies for this new layer. It\n also merges all of the layers from all of the dependent collections\n together into the new layers for this graph.\n\n Parameters\n ----------\n name : str\n The name of the new layer\n layer : Mapping\n The graph layer itself\n dependencies : List of Dask collections\n A list of other dask collections (like arrays or dataframes) that\n have graphs themselves\n\n Examples\n --------\n\n In typical usage we make a new task layer, and then pass that layer\n along with all dependent collections to this method.\n\n >>> def add(self, other):\n ... name = 'add-' + tokenize(self, other)\n ... layer = {(name, i): (add, input_key, other)\n ... for i, input_key in enumerate(self.__dask_keys__())}\n ... graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])\n ... return new_collection(name, graph)\n ", + "n_words": 179, + "vocab_size": 105, + "n_whitespaces": 407, + "language": "en" + } + }, + { + "id": 116887, + "commit_id": "cc6313a0f791ba42782082b1161b6a62578e45f4", + "repo": "mindsdb", + "path": "tests/unit/test_ml_handlers.py", + "file_name": "test_ml_handlers.py", + "fun_name": "test_hf_classification_bin", + "commit_message": "fixed one line prediction for new ml handler api", + "code": "def test_hf_classification_bin(self, mock_handler):\n\n\n # create predictor\n create_sql = \n\n model_name = 'spam_classifier'\n\n predict_sql = \n self.hf_test_run(mock_handler, model_name, create_sql, predict_sql)\n\n # one line prediction\n predict_sql = \n # use predictor\n ret = self.command_executor.execute_command(parse_sql(predict_sql, dialect='mindsdb'))\n assert ret.error_code is None\n", + "url": "https://github.com/mindsdb/mindsdb.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 106, + "n_words": 34, + "vocab_size": 26, + "complexity": 1, + "nloc": 23, + "token_counts": 54, + "n_ast_nodes": 98, + "n_identifiers": 13, + "d_id": 25865, + "documentation": { + "docstring": "\n CREATE PREDICTOR huggingface.spam_classifier\n predict PRED\n USING\n task='text-classification',\n model_name= \"mrm8488/bert-tiny-finetuned-sms-spam-detection\",\n input_column = 'text_spammy',\n labels=['ham','spam']\n \n SELECT h.*\n FROM pg.df as t \n JOIN huggingface.spam_classifier as h\n \n SELECT * from huggingface.spam_classifier\n where text_spammy= 'It is the best time to launch the Robot to get more money. https:\\\\/\\\\/Gof.bode-roesch.de\\\\/Gof'\n ", + "n_words": 43, + "vocab_size": 37, + "n_whitespaces": 216, + "language": "en" + } + }, + { + "id": 265476, + "commit_id": "e96620260a6c1b5cf8cff2112d40d061984a7b2c", + "repo": "netbox", + "path": "netbox/netbox/denormalized.py", + "file_name": "denormalized.py", + "fun_name": "update_denormalized_fields", + "commit_message": "Closes #9903: Implement a mechanism for automatically updating denormalized fields", + "code": "def update_denormalized_fields(sender, instance, created, raw, **kwargs):\n \n # Skip for new objects or those being populated from raw data\n if created or raw:\n return\n\n # Look up any denormalized fields referencing this model from the application registry\n for model, field_name, mappings in registry['denormalized_fields'].get(sender, []):\n logger.debug(f'Updating denormalized values for {model}.{field_name}')\n filter_params = {\n field_name: instance.pk,\n }\n update_params = {\n # Map the denormalized field names to the instance's values\n denorm: getattr(instance, origin) for denorm, origin in mappings.items()\n }\n\n # TODO: Improve efficiency here by placing conditions on the query?\n # Update all the denormalized fields with the triggering object's new values\n count = model.objects.filter(**filter_params).update(**update_params)\n logger.debug(f'Updated {count} rows')\n", + "url": "https://github.com/netbox-community/netbox.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 223, + "n_words": 105, + "vocab_size": 79, + "complexity": 5, + "nloc": 13, + "token_counts": 104, + "n_ast_nodes": 180, + "n_identifiers": 24, + "d_id": 78111, + "documentation": { + "docstring": "\n Check if the sender has denormalized fields registered, and update them as necessary.\n ", + "n_words": 13, + "vocab_size": 13, + "n_whitespaces": 20, + "language": "en" + } + }, + { + "id": 208820, + "commit_id": "8a66e854a87b5147d811bd3bc92c5c2a382633e1", + "repo": "ipython", + "path": "IPython/extensions/autoreload.py", + "file_name": "autoreload.py", + "fun_name": "aimport", + "commit_message": "Improve parsing for `%aimport`", + "code": "def aimport(self, parameter_s=\"\", stream=None):\n \n modname = parameter_s\n if not modname:\n to_reload = sorted(self._reloader.modules.keys())\n to_skip = sorted(self._reloader.skip_modules.keys())\n if stream is None:\n stream = sys.stdout\n if self._reloader.check_all:\n stream.write(\"Modules to reload:\\nall-except-skipped\\n\")\n else:\n stream.write(\"Modules to reload:\\n%s\\n\" % \" \".join(to_reload))\n stream.write(\"\\nModules to skip:\\n%s\\n\" % \" \".join(to_skip))\n else:\n for _module in [_.strip() for _ in modname.split(\",\")]:\n if _module.startswith(\"-\"):\n _module = _module[1:].strip()\n self._reloader.mark_module_skipped(_module)\n else:\n top_module, top_name = self._reloader.aimport_module(_module)\n\n # Inject module to user namespace\n self.shell.push({top_name: top_module})\n", + "url": "https://github.com/ipython/ipython.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 344, + "n_words": 69, + "vocab_size": 49, + "complexity": 7, + "nloc": 20, + "token_counts": 175, + "n_ast_nodes": 308, + "n_identifiers": 28, + "d_id": 52549, + "documentation": { + "docstring": "%aimport => Import modules for automatic reloading.\n\n %aimport\n List modules to automatically import and not to import.\n\n %aimport foo\n Import module 'foo' and mark it to be autoreloaded for %autoreload 1\n\n %aimport foo, bar\n Import modules 'foo', 'bar' and mark them to be autoreloaded for %autoreload 1\n\n %aimport -foo, bar\n Mark module 'foo' to not be autoreloaded for %autoreload 1, 2, or 3, and 'bar'\n to be autoreloaded for 1.\n ", + "n_words": 70, + "vocab_size": 35, + "n_whitespaces": 140, + "language": "en" + } + }, + { + "id": 223881, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/email/utils.py", + "file_name": "utils.py", + "fun_name": "format_datetime", + "commit_message": "add python 3.10.4 for windows", + "code": "def format_datetime(dt, usegmt=False):\n \n now = dt.timetuple()\n if usegmt:\n if dt.tzinfo is None or dt.tzinfo != datetime.timezone.utc:\n raise ValueError(\"usegmt option requires a UTC datetime\")\n zone = 'GMT'\n elif dt.tzinfo is None:\n zone = '-0000'\n else:\n zone = dt.strftime(\"%z\")\n return _format_timetuple_and_zone(now, zone)\n\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 97, + "n_words": 40, + "vocab_size": 31, + "complexity": 5, + "nloc": 11, + "token_counts": 72, + "n_ast_nodes": 125, + "n_identifiers": 13, + "d_id": 57131, + "documentation": { + "docstring": "Turn a datetime into a date string as specified in RFC 2822.\n\n If usegmt is True, dt must be an aware datetime with an offset of zero. In\n this case 'GMT' will be rendered instead of the normal +0000 required by\n RFC2822. This is to support HTTP headers involving date stamps.\n ", + "n_words": 51, + "vocab_size": 44, + "n_whitespaces": 65, + "language": "en" + } + }, + { + "id": 268035, + "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", + "repo": "ansible", + "path": "test/lib/ansible_test/_internal/provisioning.py", + "file_name": "provisioning.py", + "fun_name": "get_controller_target_connections", + "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", + "code": "def get_controller_target_connections(self) -> t.List[SshConnection]:\n \n return list(itertools.chain.from_iterable([target.get_controller_target_connections() for\n target in self.target_profiles if isinstance(target, SshTargetHostProfile)]))\n", + "url": "https://github.com/ansible/ansible.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 77, + "n_words": 13, + "vocab_size": 13, + "complexity": 3, + "nloc": 4, + "token_counts": 44, + "n_ast_nodes": 70, + "n_identifiers": 13, + "d_id": 79309, + "documentation": { + "docstring": "Return SSH connection(s) for accessing all target hosts from the controller.", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 22423, + "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", + "repo": "Python", + "path": "Eight_Puzzle_Solver/eight_puzzle.py", + "file_name": "eight_puzzle.py", + "fun_name": "getAvailableActions", + "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", + "code": "def getAvailableActions(self):\n \n action = list()\n for i in range(self.size):\n for j in range(self.size):\n if self.state[i][j] == 0:\n if i > 0:\n action.append(2)\n if j > 0:\n action.append(0)\n if i < self.size - 1:\n action.append(3)\n if j < self.size - 1:\n action.append(1)\n return action\n return action\n", + "url": "https://github.com/geekcomputers/Python.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 286, + "n_words": 45, + "vocab_size": 24, + "complexity": 8, + "nloc": 15, + "token_counts": 100, + "n_ast_nodes": 161, + "n_identifiers": 10, + "d_id": 4328, + "documentation": { + "docstring": "\n Parameters: Current State\n Returns: Available Actions for Current State\n 0 - Left 1 - Right 2 - Top 3 - Bottom\n Restrictions: state is self.size x self.size Array\n ", + "n_words": 28, + "vocab_size": 22, + "n_whitespaces": 73, + "language": "en" + } + }, + { + "id": 56867, + "commit_id": "83002be7b3d6ec51edcb8252484e52d918c514c1", + "repo": "prefect", + "path": "src/prefect/packaging/serializers.py", + "file_name": "serializers.py", + "fun_name": "check_picklelib", + "commit_message": "Begin sketch of deployment/packager/manifest relationship", + "code": "def check_picklelib(cls, value):\n \n try:\n pickler = from_qualified_name(value)\n except (ImportError, AttributeError) as exc:\n raise ValueError(\n f\"Failed to import requested pickle library: {value!r}.\"\n ) from exc\n\n if not hasattr(pickler, \"dumps\"):\n raise ValueError(\n f\"Pickle library at {value!r} does not have a 'dumps' method.\"\n )\n\n if not hasattr(pickler, \"loads\"):\n raise ValueError(\n f\"Pickle library at {value!r} does not have a 'loads' method.\"\n )\n\n return value\n", + "url": "https://github.com/PrefectHQ/prefect.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 224, + "n_words": 60, + "vocab_size": 41, + "complexity": 4, + "nloc": 16, + "token_counts": 65, + "n_ast_nodes": 125, + "n_identifiers": 10, + "d_id": 11574, + "documentation": { + "docstring": "\n Check that the given pickle library is importable and has dumps/loads methods.\n ", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 27, + "language": "en" + } + }, + { + "id": 205782, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/db/models/query.py", + "file_name": "query.py", + "fun_name": "dates", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def dates(self, field_name, kind, order=\"ASC\"):\n \n if kind not in (\"year\", \"month\", \"week\", \"day\"):\n raise ValueError(\"'kind' must be one of 'year', 'month', 'week', or 'day'.\")\n if order not in (\"ASC\", \"DESC\"):\n raise ValueError(\"'order' must be either 'ASC' or 'DESC'.\")\n return (\n self.annotate(\n datefield=Trunc(field_name, kind, output_field=DateField()),\n plain_field=F(field_name),\n )\n .values_list(\"datefield\", flat=True)\n .distinct()\n .filter(plain_field__isnull=False)\n .order_by((\"-\" if order == \"DESC\" else \"\") + \"datefield\")\n )\n\n # RemovedInDjango50Warning: when the deprecation ends, remove is_dst\n # argument.", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 22, + "n_whitespaces": 229, + "n_words": 70, + "vocab_size": 58, + "complexity": 4, + "nloc": 15, + "token_counts": 113, + "n_ast_nodes": 197, + "n_identifiers": 19, + "d_id": 51213, + "documentation": { + "docstring": "\n Return a list of date objects representing all available dates for\n the given field_name, scoped to 'kind'.\n ", + "n_words": 17, + "vocab_size": 17, + "n_whitespaces": 39, + "language": "en" + } + }, + { + "id": 178405, + "commit_id": "a24d709ad2350c081a9a41cd76db72288b8ab014", + "repo": "Nuitka", + "path": "nuitka/Options.py", + "file_name": "Options.py", + "fun_name": "commentArgs", + "commit_message": "UI: Added warning when following stdlib in accelerated mode\n\n* That's not going to work at this time, so we should inform the\n user to not use it.", + "code": "def commentArgs():\n \n # A ton of cases to consider, pylint: disable=too-many-branches,too-many-statements\n\n # Inform the user about potential issues with the running version. e.g. unsupported\n # version.\n if python_version_str not in getSupportedPythonVersions():\n # Do not disturb run of automatic tests with, detected from the presence of\n # that environment variable.\n if \"PYTHON\" not in os.environ:\n Tracing.general.warning(\n \"The version %r is not currently supported. Expect problems.\"\n % python_version_str,\n )\n\n default_reference_mode = (\n \"runtime\" if shallMakeModule() or isStandaloneMode() else \"original\"\n )\n\n if getFileReferenceMode() is None:\n options.file_reference_mode = default_reference_mode\n else:\n if options.file_reference_mode != default_reference_mode:\n Tracing.options_logger.warning(\n \"Using non-default file reference mode '%s' rather than '%s' may cause runtime issues.\"\n % (getFileReferenceMode(), default_reference_mode)\n )\n else:\n Tracing.options_logger.info(\n \"Using default file reference mode '%s' need not be specified.\"\n % default_reference_mode\n )\n\n # TODO: Not all of these are usable with MSYS2 really, split those off.\n if getOS() != \"Windows\":\n # Too many Windows specific options clearly, pylint: disable=too-many-boolean-expressions\n if (\n getWindowsIconExecutablePath()\n or shallAskForWindowsAdminRights()\n or shallAskForWindowsUIAccessRights()\n or getWindowsCompanyName()\n or getWindowsProductName()\n or getWindowsProductVersion()\n or getWindowsFileVersion()\n or getForcedStderrPath() # not yet for other platforms\n or getForcedStdoutPath()\n or getWindowsSplashScreen()\n or getIntendedPythonArch()\n ):\n Tracing.options_logger.warning(\n \"Using Windows specific options has no effect on other platforms.\"\n )\n\n if options.mingw64 or options.msvc_version:\n Tracing.options_logger.warning(\n \"Requesting Windows specific compilers has no effect on other platforms.\"\n )\n\n if isMingw64() and getMsvcVersion():\n Tracing.options_logger.sysexit(\n \"Requesting both Windows specific compilers makes no sense.\"\n )\n\n if isOnefileMode():\n standalone_mode = \"onefile\"\n elif isStandaloneMode():\n standalone_mode = \"standalone\"\n else:\n standalone_mode = None\n\n if standalone_mode and not hasStandaloneSupportedOS():\n Tracing.options_logger.warning(\n \"Standalone mode on %s is not known to be supported, might fail to work.\"\n % getOS()\n )\n\n if options.follow_all and standalone_mode:\n Tracing.options_logger.info(\n \"Following all imports is the default for %s mode and need not be specified.\"\n % standalone_mode\n )\n\n if options.follow_none and standalone_mode:\n Tracing.options_logger.warning(\n \"Following no imports is unlikely to work for %s mode and should not be specified.\"\n % standalone_mode\n )\n\n if options.follow_stdlib and not standalone_mode:\n Tracing.options_logger.warning(\n \"Following imports to stdlib is unlikely to work without --standalone/--onefile and should not be specified.\"\n )\n\n if (\n not shallDumpBuiltTreeXML()\n and not standalone_mode\n and not options.follow_all\n and not options.follow_none\n and not options.follow_modules\n and not options.follow_stdlib\n and not options.include_modules\n and not options.include_packages\n and not options.include_extra\n and not options.follow_not_modules\n ):\n Tracing.options_logger.warning(\n \n % (\"module\" if shallMakeModule() else \"program\")\n )\n\n if options.dependency_tool:\n Tracing.options_logger.warning(\n \"Using removed option '--windows-dependency-tool' is deprecated and has no impact anymore.\"\n )\n\n if shallMakeModule() and options.static_libpython == \"yes\":\n Tracing.options_logger.warning(\n \"In module mode, providing '--static-libpython' has no effect, it's not used.\"\n )\n\n options.static_libpython = \"no\"\n\n if (\n not isPgoMode()\n and not isPythonPgoMode()\n and (getPgoArgs() or getPgoExecutable())\n ):\n Tracing.optimization_logger.warning(\n \"Providing PGO arguments without enabling PGO mode has no effect.\"\n )\n\n if isPgoMode():\n if isStandaloneMode():\n Tracing.optimization_logger.warning(\n \"Using PGO with standalone/onefile mode is not currently working. Expect errors.\"\n )\n\n if shallMakeModule():\n Tracing.optimization_logger.warning(\n \"Using PGO with module mode is not currently working. Expect errors.\"\n )\n\n if (\n options.static_libpython == \"auto\"\n and not shallMakeModule()\n and not shallDumpBuiltTreeXML()\n and not shallUseStaticLibPython()\n and getSystemStaticLibPythonPath() is not None\n ):\n Tracing.options_logger.info(\n \n )\n\n if not shallExecuteImmediately():\n if shallRunInDebugger():\n Tracing.options_logger.warning(\n \"The '--debugger' option has no effect outside of '--debug' without '--run' option.\"\n )\n\n if not shallClearPythonPathEnvironment():\n Tracing.options_logger.warning(\n \"The '--execute-with-pythonpath' option has no effect without '--run' option.\"\n )\n\n", + "url": "https://github.com/Nuitka/Nuitka.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 20, + "n_whitespaces": 1656, + "n_words": 514, + "vocab_size": 221, + "complexity": 62, + "nloc": 136, + "token_counts": 523, + "n_ast_nodes": 941, + "n_identifiers": 57, + "d_id": 42687, + "documentation": { + "docstring": "Comment on options, where we know something is not having the intended effect.\n\n :meta private:\n\n You did not specify to follow or include anything but main %s. Check options and \\\nmake sure that is intended.Detected static libpython to exist, consider '--static-libpython=yes' for better performance, \\\nbut errors may happen.", + "n_words": 50, + "vocab_size": 45, + "n_whitespaces": 53, + "language": "en" + } + }, + { + "id": 161257, + "commit_id": "0caed984e39c07849a13662894f4cbdbe0a98091", + "repo": "MockingBird", + "path": "vocoder/fregan/modules.py", + "file_name": "modules.py", + "fun_name": "forward", + "commit_message": "The new vocoder Fre-GAN is now supported (#546)\n\n* The new vocoder Fre-GAN is now supported\r\n\r\n* Improved some fregan details", + "code": "def forward(self, c):\n \n batch, cond_channels, cond_length = c.shape\n\n c = self.input_conv(c)\n c = c + self.residual_conv(c)\n k = self.kernel_conv(c)\n b = self.bias_conv(c)\n\n kernels = k.contiguous().view(batch,\n self.conv_layers,\n self.conv_in_channels,\n self.conv_out_channels,\n self.conv_kernel_size,\n cond_length)\n bias = b.contiguous().view(batch,\n self.conv_layers,\n self.conv_out_channels,\n cond_length)\n return kernels, bias\n\n", + "url": "https://github.com/babysor/MockingBird.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 389, + "n_words": 39, + "vocab_size": 27, + "complexity": 1, + "nloc": 17, + "token_counts": 107, + "n_ast_nodes": 163, + "n_identifiers": 21, + "d_id": 38950, + "documentation": { + "docstring": "\n Args:\n c (Tensor): the conditioning sequence (batch, cond_channels, cond_length)\n Returns:\n ", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 43, + "language": "en" + } + }, + { + "id": 242744, + "commit_id": "ee85e387bab535e2339b9d3cd1ab87c61d23af15", + "repo": "Pillow", + "path": "src/PIL/Jpeg2KImagePlugin.py", + "file_name": "Jpeg2KImagePlugin.py", + "fun_name": "_parse_codestream", + "commit_message": "Remove redundant parentheses", + "code": "def _parse_codestream(fp):\n \n\n hdr = fp.read(2)\n lsiz = struct.unpack(\">H\", hdr)[0]\n siz = hdr + fp.read(lsiz - 2)\n lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, _, _, _, _, csiz = struct.unpack_from(\n \">HHIIIIIIIIH\", siz\n )\n ssiz = [None] * csiz\n xrsiz = [None] * csiz\n yrsiz = [None] * csiz\n for i in range(csiz):\n ssiz[i], xrsiz[i], yrsiz[i] = struct.unpack_from(\">BBB\", siz, 36 + 3 * i)\n\n size = (xsiz - xosiz, ysiz - yosiz)\n if csiz == 1:\n if (yrsiz[0] & 0x7F) > 8:\n mode = \"I;16\"\n else:\n mode = \"L\"\n elif csiz == 2:\n mode = \"LA\"\n elif csiz == 3:\n mode = \"RGB\"\n elif csiz == 4:\n mode = \"RGBA\"\n else:\n mode = None\n\n return size, mode\n\n", + "url": "https://github.com/python-pillow/Pillow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 244, + "n_words": 115, + "vocab_size": 67, + "complexity": 7, + "nloc": 27, + "token_counts": 196, + "n_ast_nodes": 311, + "n_identifiers": 23, + "d_id": 69907, + "documentation": { + "docstring": "Parse the JPEG 2000 codestream to extract the size and component\n count from the SIZ marker segment, returning a PIL (size, mode) tuple.", + "n_words": 23, + "vocab_size": 21, + "n_whitespaces": 25, + "language": "en" + } + }, + { + "id": 203832, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/contrib/gis/db/backends/postgis/operations.py", + "file_name": "operations.py", + "fun_name": "_get_postgis_func", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def _get_postgis_func(self, func):\n \n # Close out the connection. See #9437.\n with self.connection.temporary_connection() as cursor:\n cursor.execute(\"SELECT %s()\" % func)\n return cursor.fetchone()[0]\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 64, + "n_words": 20, + "vocab_size": 20, + "complexity": 1, + "nloc": 4, + "token_counts": 36, + "n_ast_nodes": 66, + "n_identifiers": 8, + "d_id": 50549, + "documentation": { + "docstring": "\n Helper routine for calling PostGIS functions and returning their result.\n ", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 25, + "language": "en" + } + }, + { + "id": 73833, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/core/models/__init__.py", + "file_name": "__init__.py", + "fun_name": "can_publish_subpage", + "commit_message": "Reformat with black", + "code": "def can_publish_subpage(self):\n \n if not self.user.is_active:\n return False\n specific_class = self.page.specific_class\n if specific_class is None or not specific_class.creatable_subpage_models():\n return False\n\n return self.user.is_superuser or (\"publish\" in self.permissions)\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 82, + "n_words": 25, + "vocab_size": 18, + "complexity": 5, + "nloc": 7, + "token_counts": 51, + "n_ast_nodes": 86, + "n_identifiers": 9, + "d_id": 16134, + "documentation": { + "docstring": "\n Niggly special case for creating and publishing a page in one go.\n Differs from can_publish in that we want to be able to publish subpages of root, but not\n to be able to publish root itself. (Also, can_publish_subpage returns false if the page\n does not allow subpages at all.)\n ", + "n_words": 49, + "vocab_size": 39, + "n_whitespaces": 85, + "language": "en" + } + }, + { + "id": 244031, + "commit_id": "cac356380d505bf15587f07c0529218cc36b9652", + "repo": "mmdetection", + "path": "mmdet/core/bbox/match_costs/match_cost.py", + "file_name": "match_cost.py", + "fun_name": "binary_mask_dice_loss", + "commit_message": "[Feature] Add Maskformer to mmdet (#7212)\n\n* first commit\r\n\r\n* add README\r\n\r\n* move model description from config to readme\r\n\r\nadd description for binary_input\r\n\r\nadd description for dice loss\r\n\r\nadd a independent panoptic gt processing function\r\n\r\nadd a independent panoptic gt processing function\r\n\r\nremove compatibility of pretrain in maskformer\r\n\r\n* update comments in maskformer_head\r\n\r\n* update docs format", + "code": "def binary_mask_dice_loss(self, mask_preds, gt_masks):\n \n mask_preds = mask_preds.flatten(1)\n gt_masks = gt_masks.flatten(1).float()\n numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks)\n denominator = mask_preds.sum(-1)[:, None] + gt_masks.sum(-1)[None, :]\n loss = 1 - (numerator + self.eps) / (denominator + self.eps)\n return loss\n", + "url": "https://github.com/open-mmlab/mmdetection.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 86, + "n_words": 37, + "vocab_size": 28, + "complexity": 1, + "nloc": 7, + "token_counts": 92, + "n_ast_nodes": 146, + "n_identifiers": 13, + "d_id": 70203, + "documentation": { + "docstring": "\n Args:\n mask_preds (Tensor): Mask prediction in shape (num_query, *).\n gt_masks (Tensor): Ground truth in shape (num_gt, *)\n store 0 or 1, 0 for negative class and 1 for\n positive class.\n\n Returns:\n Tensor: Dice cost matrix in shape (num_query, num_gt).\n ", + "n_words": 39, + "vocab_size": 31, + "n_whitespaces": 124, + "language": "en" + } + }, + { + "id": 60412, + "commit_id": "cc4d0564756ca067516f71718a3d135996525909", + "repo": "transferlearning", + "path": "code/deep/BJMMD/caffe/scripts/cpp_lint.py", + "file_name": "cpp_lint.py", + "fun_name": "_DropCommonSuffixes", + "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", + "code": "def _DropCommonSuffixes(filename):\n \n for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',\n 'inl.h', 'impl.h', 'internal.h'):\n if (filename.endswith(suffix) and len(filename) > len(suffix) and\n filename[-len(suffix) - 1] in ('-', '_')):\n return filename[:-len(suffix) - 1]\n return os.path.splitext(filename)[0]\n\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 64, + "n_words": 30, + "vocab_size": 25, + "complexity": 5, + "nloc": 7, + "token_counts": 84, + "n_ast_nodes": 143, + "n_identifiers": 8, + "d_id": 12140, + "documentation": { + "docstring": "Drops common suffixes like _test.cc or -inl.h from filename.\n\n For example:\n >>> _DropCommonSuffixes('foo/foo-inl.h')\n 'foo/foo'\n >>> _DropCommonSuffixes('foo/bar/foo.cc')\n 'foo/bar/foo'\n >>> _DropCommonSuffixes('foo/foo_internal.h')\n 'foo/foo'\n >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')\n 'foo/foo_unusualinternal'\n\n Args:\n filename: The input filename.\n\n Returns:\n The filename with the common suffix removed.\n ", + "n_words": 36, + "vocab_size": 29, + "n_whitespaces": 70, + "language": "en" + } + }, + { + "id": 206737, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/utils/text.py", + "file_name": "text.py", + "fun_name": "_text_words", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def _text_words(self, length, truncate):\n \n words = self._wrapped.split()\n if len(words) > length:\n words = words[:length]\n return self.add_truncation_text(\" \".join(words), truncate)\n return \" \".join(words)\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 71, + "n_words": 21, + "vocab_size": 18, + "complexity": 2, + "nloc": 6, + "token_counts": 55, + "n_ast_nodes": 92, + "n_identifiers": 10, + "d_id": 51669, + "documentation": { + "docstring": "\n Truncate a string after a certain number of words.\n\n Strip newlines in the string.\n ", + "n_words": 14, + "vocab_size": 13, + "n_whitespaces": 36, + "language": "en" + } + }, + { + "id": 299641, + "commit_id": "2a9f043039dc60fc25bc14bab724419bedf746bb", + "repo": "core", + "path": "homeassistant/util/ulid.py", + "file_name": "ulid.py", + "fun_name": "ulid", + "commit_message": "Use ULID short format for context ids (#71119)", + "code": "def ulid() -> str:\n \n ulid_bytes = int(time.time() * 1000).to_bytes(6, byteorder=\"big\") + int(\n getrandbits(80)\n ).to_bytes(10, byteorder=\"big\")\n\n # This is base32 crockford encoding with the loop unrolled for performance\n #\n # This code is adapted from:\n # https://github.com/ahawker/ulid/blob/06289583e9de4286b4d80b4ad000d137816502ca/ulid/base32.py#L102\n #\n enc = \"0123456789ABCDEFGHJKMNPQRSTVWXYZ\"\n return (\n enc[(ulid_bytes[0] & 224) >> 5]\n + enc[ulid_bytes[0] & 31]\n + enc[(ulid_bytes[1] & 248) >> 3]\n + enc[((ulid_bytes[1] & 7) << 2) | ((ulid_bytes[2] & 192) >> 6)]\n + enc[((ulid_bytes[2] & 62) >> 1)]\n + enc[((ulid_bytes[2] & 1) << 4) | ((ulid_bytes[3] & 240) >> 4)]\n + enc[((ulid_bytes[3] & 15) << 1) | ((ulid_bytes[4] & 128) >> 7)]\n + enc[(ulid_bytes[4] & 124) >> 2]\n + enc[((ulid_bytes[4] & 3) << 3) | ((ulid_bytes[5] & 224) >> 5)]\n + enc[ulid_bytes[5] & 31]\n + enc[(ulid_bytes[6] & 248) >> 3]\n + enc[((ulid_bytes[6] & 7) << 2) | ((ulid_bytes[7] & 192) >> 6)]\n + enc[(ulid_bytes[7] & 62) >> 1]\n + enc[((ulid_bytes[7] & 1) << 4) | ((ulid_bytes[8] & 240) >> 4)]\n + enc[((ulid_bytes[8] & 15) << 1) | ((ulid_bytes[9] & 128) >> 7)]\n + enc[(ulid_bytes[9] & 124) >> 2]\n + enc[((ulid_bytes[9] & 3) << 3) | ((ulid_bytes[10] & 224) >> 5)]\n + enc[ulid_bytes[10] & 31]\n + enc[(ulid_bytes[11] & 248) >> 3]\n + enc[((ulid_bytes[11] & 7) << 2) | ((ulid_bytes[12] & 192) >> 6)]\n + enc[(ulid_bytes[12] & 62) >> 1]\n + enc[((ulid_bytes[12] & 1) << 4) | ((ulid_bytes[13] & 240) >> 4)]\n + enc[((ulid_bytes[13] & 15) << 1) | ((ulid_bytes[14] & 128) >> 7)]\n + enc[(ulid_bytes[14] & 124) >> 2]\n + enc[((ulid_bytes[14] & 3) << 3) | ((ulid_bytes[15] & 224) >> 5)]\n + enc[ulid_bytes[15] & 31]\n )\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 36, + "n_whitespaces": 484, + "n_words": 262, + "vocab_size": 99, + "complexity": 1, + "nloc": 49, + "token_counts": 614, + "n_ast_nodes": 856, + "n_identifiers": 9, + "d_id": 98556, + "documentation": { + "docstring": "Generate a ULID.\n\n This ulid should not be used for cryptographically secure\n operations.\n\n 01AN4Z07BY 79KA1307SR9X4MV3\n |----------| |----------------|\n Timestamp Randomness\n 48bits 80bits\n\n This string can be loaded directly with https://github.com/ahawker/ulid\n\n import homeassistant.util.ulid as ulid_util\n import ulid\n ulid.parse(ulid_util.ulid())\n ", + "n_words": 36, + "vocab_size": 32, + "n_whitespaces": 103, + "language": "en" + } + }, + { + "id": 272207, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/integration_test/gradient_checkpoint_test.py", + "file_name": "gradient_checkpoint_test.py", + "fun_name": "_train_with_recompute", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def _train_with_recompute(n_steps):\n \n img_dim, n_channels, batch_size = 256, 1, 4\n x, y = _get_dummy_data(img_dim, n_channels, batch_size)\n # This model is the same model as _get_big_cnn_model but split into 3 parts.\n models = _get_split_cnn_model(\n img_dim, n_channels, num_partitions=3, blocks_per_partition=2\n )\n model1, model2, model3 = models\n # Apply gradient checkpointing to the submodels using tf.recompute_grad.\n model1_re = tf.recompute_grad(model1)\n model2_re = tf.recompute_grad(model2)\n model3_re = tf.recompute_grad(model3)\n optimizer = optimizers.SGD()\n tr_vars = (\n model1.trainable_variables\n + model2.trainable_variables\n + model3.trainable_variables\n )\n losses = []\n for _ in range(n_steps):\n with tf.GradientTape() as tape:\n logits1 = model1_re(x)\n logits2 = model2_re(logits1)\n logits3 = model3_re(logits2)\n loss = _compute_loss(logits3, y)\n losses.append(loss)\n grads = tape.gradient(loss, tr_vars) # tr_vars\n optimizer.apply_gradients(zip(grads, tr_vars))\n del grads\n return losses\n\n\n@tf_test_utils.with_eager_op_as_function", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "@tf_test_utils.with_eager_op_as_function", + "n_ast_errors": 1, + "ast_levels": 13, + "n_whitespaces": 284, + "n_words": 110, + "vocab_size": 82, + "complexity": 2, + "nloc": 28, + "token_counts": 176, + "n_ast_nodes": 288, + "n_identifiers": 42, + "d_id": 80977, + "documentation": { + "docstring": "Trains a single large model with gradient checkpointing using tf.recompute_grad.", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 74807, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/documents/tests/test_admin_views.py", + "file_name": "test_admin_views.py", + "fun_name": "test_edit_post", + "commit_message": "Reformat with black", + "code": "def test_edit_post(self):\n \n # Send request\n response = self.client.post(\n reverse(\"wagtaildocs:edit_multiple\", args=(self.doc.id,)),\n {\n \"doc-%d-%s\" % (self.doc.id, field): data\n for field, data in self.edit_post_data.items()\n },\n )\n\n # Check response\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response[\"Content-Type\"], \"application/json\")\n\n # Check JSON\n response_json = json.loads(response.content.decode())\n self.assertIn(\"doc_id\", response_json)\n self.assertNotIn(\"form\", response_json)\n self.assertIn(\"success\", response_json)\n self.assertEqual(response_json[\"doc_id\"], self.doc.id)\n self.assertTrue(response_json[\"success\"])\n\n self.check_doc_after_edit()\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 214, + "n_words": 46, + "vocab_size": 38, + "complexity": 2, + "nloc": 17, + "token_counts": 147, + "n_ast_nodes": 246, + "n_identifiers": 24, + "d_id": 16323, + "documentation": { + "docstring": "\n This tests that a POST request to the edit view edits the document\n ", + "n_words": 13, + "vocab_size": 12, + "n_whitespaces": 28, + "language": "en" + } + }, + { + "id": 185337, + "commit_id": "7bca184192191689b8a7247c92392d6b238df3d7", + "repo": "textual", + "path": "src/textual/geometry.py", + "file_name": "geometry.py", + "fun_name": "clamped", + "commit_message": "tweak for colors", + "code": "def clamped(self) -> Offset:\n \n x, y = self\n return Offset(max(x, 0), max(y, 0))\n", + "url": "https://github.com/Textualize/textual.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 34, + "n_words": 13, + "vocab_size": 13, + "complexity": 1, + "nloc": 8, + "token_counts": 30, + "n_ast_nodes": 48, + "n_identifiers": 6, + "d_id": 44981, + "documentation": { + "docstring": "Ensure x and y are above zero.\n\n Returns:\n Offset: New offset.\n ", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 36, + "language": "en" + } + }, + { + "id": 195742, + "commit_id": "5373c833ee895fb95f791849c6082ceb698b8dcc", + "repo": "sympy", + "path": "bin/mailmap_check.py", + "file_name": "mailmap_check.py", + "fun_name": "make_authors_file_lines", + "commit_message": "maint: tweaks in mailmap_check.py", + "code": "def make_authors_file_lines(git_people):\n # define new lines for the file\n header = filldedent().lstrip()\n header_extra = f\"There are a total of {len(git_people)} authors.", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 29, + "n_words": 21, + "vocab_size": 20, + "complexity": 1, + "nloc": 15, + "token_counts": 53, + "n_ast_nodes": 108, + "n_identifiers": 11, + "d_id": 47395, + "documentation": { + "docstring": "\n All people who contributed to SymPy by sending at least a patch or\n more (in the order of the date of their first contribution), except\n those who explicitly didn't want to be mentioned. People with a * next\n to their names are not found in the metadata of the git history. This\n file is generated automatically by running `./bin/authors_update.py`.\n \n lines = header.splitlines()\n lines.append('')\n lines.append(header_extra)\n lines.append('')\n lines.extend(git_people)\n return lines\n\n", + "n_words": 68, + "vocab_size": 55, + "n_whitespaces": 129, + "language": "en" + } + }, + { + "id": 118532, + "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", + "repo": "streamlit", + "path": "lib/streamlit/__init__.py", + "file_name": "__init__.py", + "fun_name": "spinner", + "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", + "code": "def spinner(text=\"In progress...\"):\n \n import streamlit.legacy_caching.caching as legacy_caching\n import streamlit.caching as caching\n from streamlit.elements.utils import clean_text\n from streamlit.proto.Spinner_pb2 import Spinner as SpinnerProto\n\n # @st.cache optionally uses spinner for long-running computations.\n # Normally, streamlit warns the user when they call st functions\n # from within an @st.cache'd function. But we do *not* want to show\n # these warnings for spinner's message, so we create and mutate this\n # message delta within the \"suppress_cached_st_function_warning\"\n # context.\n with legacy_caching.suppress_cached_st_function_warning():\n with caching.suppress_cached_st_function_warning():\n message = empty()\n\n try:\n # Set the message 0.1 seconds in the future to avoid annoying\n # flickering if this spinner runs too quickly.\n DELAY_SECS = 0.1\n display_message = True\n display_message_lock = _threading.Lock()\n", + "url": "https://github.com/streamlit/streamlit.git", + "language": "Python", + "ast_errors": "def spinner(text=\"In progress...\"):\n \"\"\"Temporarily displays a message while executing a block of code.\n\n Parameters\n ----------\n text : str\n A message to display while executing that block\n\n Example\n -------\n\n >>> with st.spinner('Wait for it...'):\n >>> time.sleep(5)\n >>> st.success('Done!')\n\n \"\"\"\n import streamlit.legacy_caching.caching as legacy_caching\n import streamlit.caching as caching\n from streamlit.elements.utils import clean_text\n from streamlit.proto.Spinner_pb2 import Spinner as SpinnerProto\n\n # @st.cache optionally uses spinner for long-running computations.\n # Normally, streamlit warns the user when they call st functions\n # from within an @st.cache'd function. But we do *not* want to show\n # these warnings for spinner's message, so we create and mutate this\n # message delta within the \"suppress_cached_st_function_warning\"\n # context.\n with legacy_caching.suppress_cached_st_function_warning():\n with caching.suppress_cached_st_function_warning():\n message = empty()\n\n try:\n # Set the message 0.1 seconds in the future to avoid annoying\n # flickering if this spinner runs too quickly.\n DELAY_SECS = 0.1\n display_message = True\n display_message_lock = _threading.Lock()", + "n_ast_errors": 1, + "ast_levels": 11, + "n_whitespaces": 202, + "n_words": 110, + "vocab_size": 80, + "complexity": 3, + "nloc": 22, + "token_counts": 124, + "n_ast_nodes": 132, + "n_identifiers": 20, + "d_id": 26274, + "documentation": { + "docstring": "Temporarily displays a message while executing a block of code.\n\n Parameters\n ----------\n text : str\n A message to display while executing that block\n\n Example\n -------\n\n >>> with st.spinner('Wait for it...'):\n >>> time.sleep(5)\n >>> st.success('Done!')\n\n ", + "n_words": 34, + "vocab_size": 27, + "n_whitespaces": 72, + "language": "en" + } + }, + { + "id": 266109, + "commit_id": "a5308ea28e851a4ddb65a4e7ca2297b641e5891f", + "repo": "netbox", + "path": "netbox/netbox/staging.py", + "file_name": "staging.py", + "fun_name": "process_queue", + "commit_message": "Closes #10851: New staging mechanism (#10890)\n\n* WIP\r\n\r\n* Convert checkout() context manager to a class\r\n\r\n* Misc cleanup\r\n\r\n* Drop unique constraint from Change model\r\n\r\n* Extend staging tests\r\n\r\n* Misc cleanup\r\n\r\n* Incorporate M2M changes\r\n\r\n* Don't cancel wipe out creation records when an object is deleted\r\n\r\n* Rename Change to StagedChange\r\n\r\n* Add documentation for change staging", + "code": "def process_queue(self):\n \n if not self.queue:\n logger.debug(f\"No queued changes; aborting\")\n return\n logger.debug(f\"Processing {len(self.queue)} queued changes\")\n\n # Iterate through the in-memory queue, creating Change instances\n changes = []\n for key, change in self.queue.items():\n logger.debug(f' {key}: {change}')\n object_type, pk = key\n action, data = change\n\n changes.append(StagedChange(\n branch=self.branch,\n action=action,\n object_type=object_type,\n object_id=pk,\n data=data\n ))\n\n # Save all Change instances to the database\n StagedChange.objects.bulk_create(changes)\n\n #\n # Signal handlers\n #\n", + "url": "https://github.com/netbox-community/netbox.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 281, + "n_words": 63, + "vocab_size": 52, + "complexity": 3, + "nloc": 18, + "token_counts": 98, + "n_ast_nodes": 183, + "n_identifiers": 20, + "d_id": 78296, + "documentation": { + "docstring": "\n Create Change instances for all actions stored in the queue.\n ", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 25, + "language": "en" + } + }, + { + "id": 206002, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/forms/models.py", + "file_name": "models.py", + "fun_name": "save", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def save(self, commit=True):\n \n if self.errors:\n raise ValueError(\n \"The %s could not be %s because the data didn't validate.\"\n % (\n self.instance._meta.object_name,\n \"created\" if self.instance._state.adding else \"changed\",\n )\n )\n if commit:\n # If committing, save the instance and the m2m data immediately.\n self.instance.save()\n self._save_m2m()\n else:\n # If not committing, add a method to the form to allow deferred\n # saving of m2m data.\n self.save_m2m = self._save_m2m\n return self.instance\n\n save.alters_data = True\n\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 279, + "n_words": 70, + "vocab_size": 54, + "complexity": 4, + "nloc": 15, + "token_counts": 71, + "n_ast_nodes": 130, + "n_identifiers": 13, + "d_id": 51322, + "documentation": { + "docstring": "\n Save this form's self.instance object if commit=True. Otherwise, add\n a save_m2m() method to the form which can be called after the instance\n is saved manually at a later time. Return the model instance.\n ", + "n_words": 33, + "vocab_size": 30, + "n_whitespaces": 62, + "language": "en" + } + }, + { + "id": 198419, + "commit_id": "bd9f607176c58dfba01e27c05c2b7d49ff97c901", + "repo": "sympy", + "path": "sympy/solvers/diophantine/diophantine.py", + "file_name": "diophantine.py", + "fun_name": "solve", + "commit_message": "Improve loop performance in solvers", + "code": "def solve(self, parameters=None, limit=None):\n self.pre_solve(parameters)\n\n coeff = self.coeff\n var = self.free_symbols\n\n if 1 in coeff:\n # negate coeff[] because input is of the form: ax + by + c == 0\n # but is used as: ax + by == -c\n c = -coeff[1]\n else:\n c = 0\n\n result = DiophantineSolutionSet(var, parameters=self.parameters)\n params = result.parameters\n\n if len(var) == 1:\n q, r = divmod(c, coeff[var[0]])\n if not r:\n result.add((q,))\n return result\n else:\n return result\n\n \n\n A = [coeff[v] for v in var]\n B = []\n if len(var) > 2:\n B.append(igcd(A[-2], A[-1]))\n A[-2] = A[-2] // B[0]\n A[-1] = A[-1] // B[0]\n for i in range(len(A) - 3, 0, -1):\n gcd = igcd(B[0], A[i])\n B[0] = B[0] // gcd\n A[i] = A[i] // gcd\n B.insert(0, gcd)\n B.append(A[-1])\n\n \n solutions = []\n for Ai, Bi in zip(A, B):\n tot_x, tot_y = [], []\n\n for j, arg in enumerate(Add.make_args(c)):\n if arg.is_Integer:\n # example: 5 -> k = 5\n k, p = arg, S.One\n pnew = params[0]\n else: # arg is a Mul or Symbol\n # example: 3*t_1 -> k = 3\n # example: t_0 -> k = 1\n k, p = arg.as_coeff_Mul()\n pnew = params[params.index(p) + 1]\n\n sol = sol_x, sol_y = base_solution_linear(k, Ai, Bi, pnew)\n\n if p is S.One:\n if None in sol:\n return result\n else:\n # convert a + b*pnew -> a*p + b*pnew\n if isinstance(sol_x, Add):\n sol_x = sol_x.args[0]*p + sol_x.args[1]\n if isinstance(sol_y, Add):\n sol_y = sol_y.args[0]*p + sol_y.args[1]\n\n tot_x.append(sol_x)\n tot_y.append(sol_y)\n\n solutions.append(Add(*tot_x))\n c = Add(*tot_y)\n\n solutions.append(c)\n result.add(solutions)\n return result\n\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 19, + "n_whitespaces": 1068, + "n_words": 246, + "vocab_size": 136, + "complexity": 14, + "nloc": 148, + "token_counts": 454, + "n_ast_nodes": 727, + "n_identifiers": 51, + "d_id": 48926, + "documentation": { + "docstring": "\n base_solution_linear() can solve diophantine equations of the form:\n\n a*x + b*y == c\n\n We break down multivariate linear diophantine equations into a\n series of bivariate linear diophantine equations which can then\n be solved individually by base_solution_linear().\n\n Consider the following:\n\n a_0*x_0 + a_1*x_1 + a_2*x_2 == c\n\n which can be re-written as:\n\n a_0*x_0 + g_0*y_0 == c\n\n where\n\n g_0 == gcd(a_1, a_2)\n\n and\n\n y == (a_1*x_1)/g_0 + (a_2*x_2)/g_0\n\n This leaves us with two binary linear diophantine equations.\n For the first equation:\n\n a == a_0\n b == g_0\n c == c\n\n For the second:\n\n a == a_1/g_0\n b == a_2/g_0\n c == the solution we find for y_0 in the first equation.\n\n The arrays A and B are the arrays of integers used for\n 'a' and 'b' in each of the n-1 bivariate equations we solve.\n \n Consider the trivariate linear equation:\n\n 4*x_0 + 6*x_1 + 3*x_2 == 2\n\n This can be re-written as:\n\n 4*x_0 + 3*y_0 == 2\n\n where\n\n y_0 == 2*x_1 + x_2\n (Note that gcd(3, 6) == 3)\n\n The complete integral solution to this equation is:\n\n x_0 == 2 + 3*t_0\n y_0 == -2 - 4*t_0\n\n where 't_0' is any integer.\n\n Now that we have a solution for 'x_0', find 'x_1' and 'x_2':\n\n 2*x_1 + x_2 == -2 - 4*t_0\n\n We can then solve for '-2' and '-4' independently,\n and combine the results:\n\n 2*x_1a + x_2a == -2\n x_1a == 0 + t_0\n x_2a == -2 - 2*t_0\n\n 2*x_1b + x_2b == -4*t_0\n x_1b == 0*t_0 + t_1\n x_2b == -4*t_0 - 2*t_1\n\n ==>\n\n x_1 == t_0 + t_1\n x_2 == -2 - 6*t_0 - 2*t_1\n\n where 't_0' and 't_1' are any integers.\n\n Note that:\n\n 4*(2 + 3*t_0) + 6*(t_0 + t_1) + 3*(-2 - 6*t_0 - 2*t_1) == 2\n\n for any integral values of 't_0', 't_1'; as required.\n\n This method is generalised for many variables, below.\n\n ", + "n_words": 307, + "vocab_size": 153, + "n_whitespaces": 695, + "language": "en" + } + }, + { + "id": 183771, + "commit_id": "fe151a7f25cfd7f1134ebafbddc7eeade1c18ccb", + "repo": "textual", + "path": "src/textual/driver.py", + "file_name": "driver.py", + "fun_name": "disable_bracketed_paste", + "commit_message": "Support for bracketed paste mode (#567)\n\n* Detecting bracketed paste, sending paste events\r\n\r\n* Bracketed pasting support in TextInput\r\n\r\n* Restore debugging conditional\r\n\r\n* Handle pasting of text in text-input, improve scrolling\r\n\r\n* Fix ordering of handling in parser for bracketed pastes\r\n\r\n* Docstrings\r\n\r\n* Add docstrings", + "code": "def disable_bracketed_paste(self) -> None:\n \n self.console.file.write(\"\\x1b[?2004l\")\n self.console.file.flush()\n", + "url": "https://github.com/Textualize/textual.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 27, + "n_words": 6, + "vocab_size": 6, + "complexity": 1, + "nloc": 5, + "token_counts": 27, + "n_ast_nodes": 50, + "n_identifiers": 6, + "d_id": 44330, + "documentation": { + "docstring": "Write the ANSI escape code `ESC[?2004l`, which\n disables bracketed paste mode.", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 17, + "language": "en" + } + }, + { + "id": 125331, + "commit_id": "ac1d21027da8a8c002cc7c28b8d1dc89c0d72fcf", + "repo": "ray", + "path": "python/ray/train/huggingface/huggingface_checkpoint.py", + "file_name": "huggingface_checkpoint.py", + "fun_name": "get_training_arguments", + "commit_message": "[AIR] Add framework-specific checkpoints (#26777)", + "code": "def get_training_arguments(self) -> transformers.training_args.TrainingArguments:\n \n with self.as_directory() as checkpoint_path:\n training_args_path = os.path.join(checkpoint_path, TRAINING_ARGS_NAME)\n if os.path.exists(training_args_path):\n with open(training_args_path, \"rb\") as f:\n training_args = torch.load(f, map_location=\"cpu\")\n else:\n training_args = None\n return training_args\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 132, + "n_words": 29, + "vocab_size": 23, + "complexity": 2, + "nloc": 10, + "token_counts": 72, + "n_ast_nodes": 126, + "n_identifiers": 18, + "d_id": 27836, + "documentation": { + "docstring": "Retrieve the training arguments stored in this checkpoint.", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 284458, + "commit_id": "33a041e5bf93ce93ab1a19adbc5ed74c2f1eb337", + "repo": "OpenBBTerminal", + "path": "openbb_terminal/stocks/tradinghours/bursa_model.py", + "file_name": "bursa_model.py", + "fun_name": "all_bursa", + "commit_message": "Trading hours stock feature (#1697)", + "code": "def all_bursa():\n \n path = os.path.join(os.path.dirname(__file__), \"data/bursa_open_hours.json\")\n bursa = pd.read_json(path) # , orient=\"index\")\n return bursa\n\n", + "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 27, + "n_words": 14, + "vocab_size": 12, + "complexity": 1, + "nloc": 4, + "token_counts": 34, + "n_ast_nodes": 60, + "n_identifiers": 9, + "d_id": 84728, + "documentation": { + "docstring": "Get all exchanges from dictionary\n\n Parameters\n __________\n\n Returns\n _______\n pd.DataFrame\n All exchanges\n ", + "n_words": 12, + "vocab_size": 11, + "n_whitespaces": 37, + "language": "en" + } + }, + { + "id": 206790, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/utils/version.py", + "file_name": "version.py", + "fun_name": "get_git_changeset", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def get_git_changeset():\n \n # Repository may not be found if __file__ is undefined, e.g. in a frozen\n # module.\n if \"__file__\" not in globals():\n return None\n repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n git_log = subprocess.run(\n \"git log --pretty=format:%ct --quiet -1 HEAD\",\n capture_output=True,\n shell=True,\n cwd=repo_dir,\n text=True,\n )\n timestamp = git_log.stdout\n tz = datetime.timezone.utc\n try:\n timestamp = datetime.datetime.fromtimestamp(int(timestamp), tz=tz)\n except ValueError:\n return None\n return timestamp.strftime(\"%Y%m%d%H%M%S\")\n\n\nversion_component_re = _lazy_re_compile(r\"(\\d+|[a-z]+|\\.)\")\n\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 153, + "n_words": 62, + "vocab_size": 49, + "complexity": 3, + "nloc": 18, + "token_counts": 107, + "n_ast_nodes": 188, + "n_identifiers": 27, + "d_id": 51708, + "documentation": { + "docstring": "Return a numeric identifier of the latest git changeset.\n\n The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.\n This value isn't guaranteed to be unique, but collisions are very unlikely,\n so it's sufficient for generating the development version numbers.\n ", + "n_words": 42, + "vocab_size": 38, + "n_whitespaces": 54, + "language": "en" + } + }, + { + "id": 177316, + "commit_id": "8a325d26aa7fdd3a72580c4720fa97f971bbefcb", + "repo": "networkx", + "path": "networkx/algorithms/tournament.py", + "file_name": "tournament.py", + "fun_name": "tournament_matrix", + "commit_message": "Use scipy.sparse array datastructure (#6037)\n\n* Use scipy.sparse array datastructure\r\n\r\n* Add reminder to rm wrapper when scipy adds creation fns.\r\n\r\n* Rm mention of np matrix from code comment.\r\n\r\n* Update networkx/algorithms/bipartite/matrix.py\r\n\r\nCo-authored-by: Stefan van der Walt \r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Stefan van der Walt ", + "code": "def tournament_matrix(G):\n r\n A = nx.adjacency_matrix(G)\n return A - A.T\n\n\n@not_implemented_for(\"undirected\")\n@not_implemented_for(\"multigraph\")", + "url": "https://github.com/networkx/networkx.git", + "language": "Python", + "ast_errors": "@not_implemented_for(\"undirected\")\n@not_implemented_for(\"multigraph\")", + "n_ast_errors": 1, + "ast_levels": 8, + "n_whitespaces": 18, + "n_words": 12, + "vocab_size": 11, + "complexity": 1, + "nloc": 38, + "token_counts": 21, + "n_ast_nodes": 56, + "n_identifiers": 7, + "d_id": 42336, + "documentation": { + "docstring": "Returns the tournament matrix for the given tournament graph.\n\n This function requires SciPy.\n\n The *tournament matrix* of a tournament graph with edge set *E* is\n the matrix *T* defined by\n\n .. math::\n\n T_{i j} =\n \\begin{cases}\n +1 & \\text{if } (i, j) \\in E \\\\\n -1 & \\text{if } (j, i) \\in E \\\\\n 0 & \\text{if } i == j.\n \\end{cases}\n\n An equivalent definition is `T = A - A^T`, where *A* is the\n adjacency matrix of the graph `G`.\n\n Parameters\n ----------\n G : NetworkX graph\n A directed graph representing a tournament.\n\n Returns\n -------\n SciPy sparse array\n The tournament matrix of the tournament graph `G`.\n\n Raises\n ------\n ImportError\n If SciPy is not available.\n\n ", + "n_words": 114, + "vocab_size": 77, + "n_whitespaces": 219, + "language": "en" + } + }, + { + "id": 244119, + "commit_id": "3f0f2a059743593fd07b629c261b609bd9a767e6", + "repo": "mmdetection", + "path": "mmdet/models/backbones/efficientnet.py", + "file_name": "efficientnet.py", + "fun_name": "model_scaling", + "commit_message": "[Feature] Support efficientnet in mmdetection. (#7514)\n\n* Initial implementation\r\n\r\n* Add missing import\r\n\r\n* Add MemoryEfficientSwishImplementation. Add docstrings\r\n\r\n* Add efficientnet2mmdet tool\r\n\r\n* Add config folder\r\n\r\n* Flake8\r\n\r\n* Flake8\r\n\r\n* Flake8\r\n\r\n* Fix config\r\n\r\n* Requested changes\r\n\r\n* docformatter\r\n\r\n* Update train config from https://github.com/google/automl/blob/master/efficientdet\r\n\r\n* Run pre-commit\r\n\r\n* Fix schedule\r\n\r\n* Set by_epoch=False in scheduler\r\n\r\n* Train 80 epochs\r\n\r\n* Remove duplicated arg\r\n\r\n* Update README.md\r\n\r\n* efficient3 efficient0\r\n\r\n* efficientNet imports\r\n\r\n* efficientNet\r\n\r\n* config edit path for eff3 and dropout for eff0\r\n\r\n* efficientnet review2\r\n\r\n* fix model_converter location and drop path\r\n\r\n* fix model converter and efficientnet import\r\n\r\n* register memoryefficietnswish\r\n\r\n* eff0, eff3\r\n\r\n* fix flake8 yapf isort\r\n\r\n* same padding in tensorflow and edit drop path rate\r\n\r\n* fix init of utils\r\n\r\n* Align mmdet utils with mmcls\r\n\r\n* Align mmdet.models.utils with mmcls\r\n\r\n* Use mmcls efficientnet backbone\r\n\r\n* Update\r\n\r\n* Update\r\n\r\n* Update metafile\r\n\r\nCo-authored-by: David de la Iglesia Castro \r\nCo-authored-by: David de la Iglesia Castro \r\nCo-authored-by: jiangyitong \r\nCo-authored-by: jiangyitong ", + "code": "def model_scaling(layer_setting, arch_setting):\n \n # scale width\n new_layer_setting = copy.deepcopy(layer_setting)\n for layer_cfg in new_layer_setting:\n for block_cfg in layer_cfg:\n block_cfg[1] = make_divisible(block_cfg[1] * arch_setting[0], 8)\n\n # scale depth\n split_layer_setting = [new_layer_setting[0]]\n for layer_cfg in new_layer_setting[1:-1]:\n tmp_index = [0]\n for i in range(len(layer_cfg) - 1):\n if layer_cfg[i + 1][1] != layer_cfg[i][1]:\n tmp_index.append(i + 1)\n tmp_index.append(len(layer_cfg))\n for i in range(len(tmp_index) - 1):\n split_layer_setting.append(layer_cfg[tmp_index[i]:tmp_index[i +\n 1]])\n split_layer_setting.append(new_layer_setting[-1])\n\n num_of_layers = [len(layer_cfg) for layer_cfg in split_layer_setting[1:-1]]\n new_layers = [\n int(math.ceil(arch_setting[1] * num)) for num in num_of_layers\n ]\n\n merge_layer_setting = [split_layer_setting[0]]\n for i, layer_cfg in enumerate(split_layer_setting[1:-1]):\n if new_layers[i] <= num_of_layers[i]:\n tmp_layer_cfg = layer_cfg[:new_layers[i]]\n else:\n tmp_layer_cfg = copy.deepcopy(layer_cfg) + [layer_cfg[-1]] * (\n new_layers[i] - num_of_layers[i])\n if tmp_layer_cfg[0][3] == 1 and i != 0:\n merge_layer_setting[-1] += tmp_layer_cfg.copy()\n else:\n merge_layer_setting.append(tmp_layer_cfg.copy())\n merge_layer_setting.append(split_layer_setting[-1])\n\n return merge_layer_setting\n\n\n@BACKBONES.register_module()", + "url": "https://github.com/open-mmlab/mmdetection.git", + "language": "Python", + "ast_errors": "@BACKBONES.register_module()", + "n_ast_errors": 1, + "ast_levels": 16, + "n_whitespaces": 415, + "n_words": 123, + "vocab_size": 78, + "complexity": 13, + "nloc": 33, + "token_counts": 325, + "n_ast_nodes": 510, + "n_identifiers": 26, + "d_id": 70249, + "documentation": { + "docstring": "Scaling operation to the layer's parameters according to the\n arch_setting.", + "n_words": 10, + "vocab_size": 8, + "n_whitespaces": 12, + "language": "en" + } + }, + { + "id": 119831, + "commit_id": "603bb3c5ca288674579211e64fa47c6b2b0fb7a6", + "repo": "jax", + "path": "jax/_src/numpy/polynomial.py", + "file_name": "polynomial.py", + "fun_name": "polyfit", + "commit_message": "lax_numpy: move poly functions into numpy.polynomial", + "code": "def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):\n _check_arraylike(\"polyfit\", x, y)\n deg = core.concrete_or_error(int, deg, \"deg must be int\")\n order = deg + 1\n # check arguments\n if deg < 0:\n raise ValueError(\"expected deg >= 0\")\n if x.ndim != 1:\n raise TypeError(\"expected 1D vector for x\")\n if x.size == 0:\n raise TypeError(\"expected non-empty vector for x\")\n if y.ndim < 1 or y.ndim > 2:\n raise TypeError(\"expected 1D or 2D array for y\")\n if x.shape[0] != y.shape[0]:\n raise TypeError(\"expected x and y to have same length\")\n\n # set rcond\n if rcond is None:\n rcond = len(x) * finfo(x.dtype).eps\n rcond = core.concrete_or_error(float, rcond, \"rcond must be float\")\n # set up least squares equation for powers of x\n lhs = vander(x, order)\n rhs = y\n\n # apply weighting\n if w is not None:\n _check_arraylike(\"polyfit\", w)\n w, = _promote_dtypes_inexact(w)\n if w.ndim != 1:\n raise TypeError(\"expected a 1-d array for weights\")\n if w.shape[0] != y.shape[0]:\n raise TypeError(\"expected w and y to have the same length\")\n lhs *= w[:, np.newaxis]\n if rhs.ndim == 2:\n rhs *= w[:, np.newaxis]\n else:\n rhs *= w\n\n # scale lhs to improve condition number and solve\n scale = sqrt((lhs*lhs).sum(axis=0))\n lhs /= scale[np.newaxis,:]\n c, resids, rank, s = linalg.lstsq(lhs, rhs, rcond)\n c = (c.T/scale).T # broadcast scale coefficients\n\n if full:\n return c, resids, rank, s, rcond\n elif cov:\n Vbase = linalg.inv(dot(lhs.T, lhs))\n Vbase /= outer(scale, scale)\n if cov == \"unscaled\":\n fac = 1\n else:\n if len(x) <= order:\n raise ValueError(\"the number of data points must exceed order \"\n \"to scale the covariance matrix\")\n fac = resids / (len(x) - order)\n fac = fac[0] #making np.array() of shape (1,) to int\n if y.ndim == 1:\n return c, Vbase * fac\n else:\n return c, Vbase[:, :, np.newaxis] * fac\n else:\n return c\n\n\n_POLY_DOC = \n\n@_wraps(np.poly, lax_description=_POLY_DOC)\n@jit", + "url": "https://github.com/google/jax.git", + "language": "Python", + "ast_errors": "@_wraps(np.poly, lax_description=_POLY_DOC)\n@jit", + "n_ast_errors": 1, + "ast_levels": 17, + "n_whitespaces": 463, + "n_words": 293, + "vocab_size": 155, + "complexity": 17, + "nloc": 54, + "token_counts": 424, + "n_ast_nodes": 700, + "n_identifiers": 50, + "d_id": 26696, + "documentation": { + "docstring": "\\\nThis differs from np.poly when an integer array is given.\nnp.poly returns a result with dtype float64 in this case.\njax returns a result with an inexact type, but not necessarily\nfloat64.\n\nThis also differs from np.poly when the input array strictly\ncontains pairs of complex conjugates, e.g. [1j, -1j, 1-1j, 1+1j].\nnp.poly returns an array with a real dtype in such cases.\njax returns an array with a complex dtype in such cases.\n", + "n_words": 75, + "vocab_size": 44, + "n_whitespaces": 66, + "language": "en" + } + }, + { + "id": 44419, + "commit_id": "6fc6edf6af7f676bfa54ff3a2e6e6d2edb938f2e", + "repo": "airflow", + "path": "airflow/models/taskinstance.py", + "file_name": "taskinstance.py", + "fun_name": "key", + "commit_message": "Make `airflow dags test` be able to execute Mapped Tasks (#21210)\n\n* Make `airflow dags test` be able to execute Mapped Tasks\r\n\r\nIn order to do this there were two steps required:\r\n\r\n- The BackfillJob needs to know about mapped tasks, both to expand them,\r\n and in order to update it's TI tracking\r\n- The DebugExecutor needed to \"unmap\" the mapped task to get the real\r\n operator back\r\n\r\nI was testing this with the following dag:\r\n\r\n```\r\nfrom airflow import DAG\r\nfrom airflow.decorators import task\r\nfrom airflow.operators.python import PythonOperator\r\nimport pendulum\r\n\r\n@task\r\ndef make_list():\r\n return list(map(lambda a: f'echo \"{a!r}\"', [1, 2, {'a': 'b'}]))\r\n\r\ndef consumer(*args):\r\n print(repr(args))\r\n\r\nwith DAG(dag_id='maptest', start_date=pendulum.DateTime(2022, 1, 18)) as dag:\r\n PythonOperator(task_id='consumer', python_callable=consumer).map(op_args=make_list())\r\n```\r\n\r\nIt can't \"unmap\" decorated operators successfully yet, so we're using\r\nold-school PythonOperator\r\n\r\nWe also just pass the whole value to the operator, not just the current\r\nmapping value(s)\r\n\r\n* Always have a `task_group` property on DAGNodes\r\n\r\nAnd since TaskGroup is a DAGNode, we don't need to store parent group\r\ndirectly anymore -- it'll already be stored\r\n\r\n* Add \"integation\" tests for running mapped tasks via BackfillJob\r\n\r\n* Only show \"Map Index\" in Backfill report when relevant\r\n\r\nCo-authored-by: Tzu-ping Chung ", + "code": "def key(self) -> TaskInstanceKey:\n \n return TaskInstanceKey(self.dag_id, self.task_id, self.run_id, self.try_number, self.map_index)\n", + "url": "https://github.com/apache/airflow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 24, + "n_words": 10, + "vocab_size": 10, + "complexity": 1, + "nloc": 3, + "token_counts": 31, + "n_ast_nodes": 47, + "n_identifiers": 8, + "d_id": 8256, + "documentation": { + "docstring": "Returns a tuple that identifies the task instance uniquely", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 264086, + "commit_id": "f5925fa56f713e78ab5723de2a58195ca346847f", + "repo": "pyinstaller", + "path": "PyInstaller/building/utils.py", + "file_name": "utils.py", + "fun_name": "_check_guts_toc_mtime", + "commit_message": "building: cleanup remove pyc argument from _check_guts_toc_mtime\n\nThe only place where we use `_check_guts_toc_mtime` with `pyc`\nargument enabled is when checking the `Analysis.pure` TOC, and\nthe source names of those entries already point to source .py files.\nSo shortening the filenames by one character results in checking\nfor non-existant .p files.\n\nEven if an entry happened to point to a .pyc file, it is highly\nunlikely that there would be an adjacent .py file available,\nbecause under contemporary python 3 versions, that would hide the\n.pyc file from the loader.", + "code": "def _check_guts_toc_mtime(attr_name, old_toc, new_toc, last_build):\n \n for dest_name, src_name, typecode in old_toc:\n if misc.mtime(src_name) > last_build:\n logger.info(\"Building because %s changed\", src_name)\n return True\n return False\n\n", + "url": "https://github.com/pyinstaller/pyinstaller.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 62, + "n_words": 24, + "vocab_size": 23, + "complexity": 3, + "nloc": 6, + "token_counts": 43, + "n_ast_nodes": 67, + "n_identifiers": 12, + "d_id": 77596, + "documentation": { + "docstring": "\n Rebuild is required if mtimes of files listed in old TOC are newer than last_build.\n\n Use this for calculated/analysed values read from cache.\n ", + "n_words": 23, + "vocab_size": 23, + "n_whitespaces": 33, + "language": "en" + } + }, + { + "id": 258487, + "commit_id": "8b6b519caf3b3b9602958a859b4d3a7eb1d9eadd", + "repo": "scikit-learn", + "path": "sklearn/random_projection.py", + "file_name": "random_projection.py", + "fun_name": "fit", + "commit_message": "ENH Preserving dtype for np.float32 in RandomProjection (#22114)\n\nCo-authored-by: takoika <>\r\nCo-authored-by: Thomas J. Fan ", + "code": "def fit(self, X, y=None):\n \n X = self._validate_data(\n X, accept_sparse=[\"csr\", \"csc\"], dtype=[np.float64, np.float32]\n )\n\n n_samples, n_features = X.shape\n\n if self.n_components == \"auto\":\n self.n_components_ = johnson_lindenstrauss_min_dim(\n n_samples=n_samples, eps=self.eps\n )\n\n if self.n_components_ <= 0:\n raise ValueError(\n \"eps=%f and n_samples=%d lead to a target dimension of \"\n \"%d which is invalid\" % (self.eps, n_samples, self.n_components_)\n )\n\n elif self.n_components_ > n_features:\n raise ValueError(\n \"eps=%f and n_samples=%d lead to a target dimension of \"\n \"%d which is larger than the original space with \"\n \"n_features=%d\"\n % (self.eps, n_samples, self.n_components_, n_features)\n )\n else:\n if self.n_components <= 0:\n raise ValueError(\n \"n_components must be greater than 0, got %s\" % self.n_components\n )\n\n elif self.n_components > n_features:\n warnings.warn(\n \"The number of components is higher than the number of\"\n \" features: n_features < n_components (%s < %s).\"\n \"The dimensionality of the problem will not be reduced.\"\n % (n_features, self.n_components),\n DataDimensionalityWarning,\n )\n\n self.n_components_ = self.n_components\n\n # Generate a projection matrix of size [n_components, n_features]\n self.components_ = self._make_random_matrix(\n self.n_components_, n_features\n ).astype(X.dtype, copy=False)\n\n # Check contract\n assert self.components_.shape == (self.n_components_, n_features), (\n \"An error has occurred the self.components_ matrix has \"\n \" not the proper shape.\"\n )\n\n return self\n", + "url": "https://github.com/scikit-learn/scikit-learn.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 760, + "n_words": 185, + "vocab_size": 109, + "complexity": 6, + "nloc": 43, + "token_counts": 220, + "n_ast_nodes": 357, + "n_identifiers": 25, + "d_id": 75247, + "documentation": { + "docstring": "Generate a sparse random projection matrix.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Training set: only the shape is used to find optimal random\n matrix dimensions based on the theory referenced in the\n afore mentioned papers.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n BaseRandomProjection class instance.\n ", + "n_words": 60, + "vocab_size": 53, + "n_whitespaces": 171, + "language": "en" + } + }, + { + "id": 196300, + "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", + "repo": "sympy", + "path": "sympy/geometry/polygon.py", + "file_name": "polygon.py", + "fun_name": "bisectors", + "commit_message": "Updated import locations", + "code": "def bisectors(self):\n \n # use lines containing sides so containment check during\n # intersection calculation can be avoided, thus reducing\n # the processing time for calculating the bisectors\n s = [Line(l) for l in self.sides]\n v = self.vertices\n c = self.incenter\n l1 = Segment(v[0], Line(v[0], c).intersection(s[1])[0])\n l2 = Segment(v[1], Line(v[1], c).intersection(s[2])[0])\n l3 = Segment(v[2], Line(v[2], c).intersection(s[0])[0])\n return {v[0]: l1, v[1]: l2, v[2]: l3}\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 139, + "n_words": 62, + "vocab_size": 53, + "complexity": 2, + "nloc": 8, + "token_counts": 143, + "n_ast_nodes": 213, + "n_identifiers": 15, + "d_id": 47800, + "documentation": { + "docstring": "The angle bisectors of the triangle.\n\n An angle bisector of a triangle is a straight line through a vertex\n which cuts the corresponding angle in half.\n\n Returns\n =======\n\n bisectors : dict\n Each key is a vertex (Point) and each value is the corresponding\n bisector (Segment).\n\n See Also\n ========\n\n sympy.geometry.point.Point, sympy.geometry.line.Segment\n\n Examples\n ========\n\n >>> from sympy import Point, Triangle, Segment\n >>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)\n >>> t = Triangle(p1, p2, p3)\n >>> from sympy import sqrt\n >>> t.bisectors()[p2] == Segment(Point(1, 0), Point(0, sqrt(2) - 1))\n True\n\n ", + "n_words": 91, + "vocab_size": 63, + "n_whitespaces": 232, + "language": "en" + } + }, + { + "id": 90548, + "commit_id": "a68089d62f514557ec38e3744593e20af484e5e2", + "repo": "sentry", + "path": "src/sentry/testutils/cases.py", + "file_name": "cases.py", + "fun_name": "get_success_response", + "commit_message": "ref(tests): Infer `status_code` from `method` (#34825)", + "code": "def get_success_response(self, *args, **params):\n \n status_code = params.pop(\"status_code\", None)\n\n if status_code and status_code >= 400:\n raise Exception(\"status_code must be < 400\")\n\n method = params.pop(\"method\", self.method).lower()\n\n response = self.get_response(*args, method=method, **params)\n\n if status_code:\n assert_status_code(response, status_code)\n elif method == \"get\":\n assert_status_code(response, status.HTTP_200_OK)\n # TODO(mgaeta): Add the other methods.\n # elif method == \"post\":\n # assert_status_code(response, status.HTTP_201_CREATED)\n elif method == \"put\":\n assert_status_code(response, status.HTTP_200_OK)\n elif method == \"delete\":\n assert_status_code(response, status.HTTP_204_NO_CONTENT)\n else:\n # TODO(mgaeta): Add other methods.\n assert_status_code(response, 200, 300)\n\n return response\n", + "url": "https://github.com/getsentry/sentry.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 255, + "n_words": 76, + "vocab_size": 47, + "complexity": 7, + "nloc": 17, + "token_counts": 126, + "n_ast_nodes": 212, + "n_identifiers": 15, + "d_id": 18674, + "documentation": { + "docstring": "\n Call `get_response` (see above) and assert the response's status code.\n\n :param params:\n * status_code: (Optional) Assert that the response's status code is\n a specific code. Omit to assert any successful status_code.\n :returns Response object\n ", + "n_words": 34, + "vocab_size": 29, + "n_whitespaces": 85, + "language": "en" + } + }, + { + "id": 133189, + "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", + "repo": "ray", + "path": "python/ray/util/multiprocessing/pool.py", + "file_name": "pool.py", + "fun_name": "starmap", + "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", + "code": "def starmap(self, func, iterable, chunksize=None):\n \n\n return self._map_async(\n func, iterable, chunksize=chunksize, unpack_args=True\n ).get()\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 44, + "n_words": 12, + "vocab_size": 10, + "complexity": 1, + "nloc": 4, + "token_counts": 35, + "n_ast_nodes": 52, + "n_identifiers": 8, + "d_id": 29954, + "documentation": { + "docstring": "Same as `map`, but unpacks each element of the iterable as the\n arguments to func like: [func(*args) for args in iterable].\n ", + "n_words": 21, + "vocab_size": 19, + "n_whitespaces": 35, + "language": "en" + } + }, + { + "id": 20359, + "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", + "repo": "pipenv", + "path": "pipenv/patched/notpip/_vendor/pygments/formatters/img.py", + "file_name": "img.py", + "fun_name": "_get_char_x", + "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", + "code": "def _get_char_x(self, linelength):\n \n return linelength + self.image_pad + self.line_number_width\n", + "url": "https://github.com/pypa/pipenv.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 23, + "n_words": 9, + "vocab_size": 8, + "complexity": 1, + "nloc": 2, + "token_counts": 18, + "n_ast_nodes": 30, + "n_identifiers": 5, + "d_id": 3344, + "documentation": { + "docstring": "\n Get the X coordinate of a character position.\n ", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 23, + "language": "en" + } + }, + { + "id": 9740, + "commit_id": "ac3bbcdf87b263f79d5e19cce173e6c709a15f9d", + "repo": "gensim", + "path": "gensim/test/test_word2vec.py", + "file_name": "test_word2vec.py", + "fun_name": "test_evaluate_word_analogies", + "commit_message": "streamlining most_similar_cosmul and evaluate_word_analogies (#2656)\n\n* streamlining most_similar_cosmul\r\n\r\n* Fix PR requested changes and add unit test\r\n\r\n* fix merge artifacts\r\n\r\nCo-authored-by: n3hrox \r\nCo-authored-by: Michael Penkov ", + "code": "def test_evaluate_word_analogies(self):\n \n model = word2vec.Word2Vec(LeeCorpus())\n score, sections = model.wv.evaluate_word_analogies(datapath('questions-words.txt'))\n score_cosmul, sections_cosmul = model.wv.evaluate_word_analogies(\n datapath('questions-words.txt'),\n similarity_function='most_similar_cosmul'\n )\n self.assertEqual(score, score_cosmul)\n self.assertEqual(sections, sections_cosmul)\n self.assertGreaterEqual(score, 0.0)\n self.assertLessEqual(score, 1.0)\n self.assertGreater(len(sections), 0)\n # Check that dict contains the right keys\n first_section = sections[0]\n self.assertIn('section', first_section)\n self.assertIn('correct', first_section)\n self.assertIn('incorrect', first_section)\n", + "url": "https://github.com/RaRe-Technologies/gensim.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 170, + "n_words": 43, + "vocab_size": 38, + "complexity": 1, + "nloc": 16, + "token_counts": 127, + "n_ast_nodes": 206, + "n_identifiers": 21, + "d_id": 1666, + "documentation": { + "docstring": "Test that evaluating analogies on KeyedVectors give sane results", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 73813, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/core/models/__init__.py", + "file_name": "__init__.py", + "fun_name": "get_default_locale", + "commit_message": "Reformat with black", + "code": "def get_default_locale(self):\n \n parent = self.get_parent()\n if parent is not None:\n return (\n parent.specific_class.objects.defer()\n .select_related(\"locale\")\n .get(id=parent.id)\n .locale\n )\n\n return super().get_default_locale()\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 129, + "n_words": 19, + "vocab_size": 17, + "complexity": 2, + "nloc": 10, + "token_counts": 55, + "n_ast_nodes": 94, + "n_identifiers": 12, + "d_id": 16124, + "documentation": { + "docstring": "\n Finds the default locale to use for this page.\n\n This will be called just before the initial save.\n ", + "n_words": 18, + "vocab_size": 17, + "n_whitespaces": 40, + "language": "en" + } + }, + { + "id": 220750, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/asyncio/streams.py", + "file_name": "streams.py", + "fun_name": "_wait_for_data", + "commit_message": "add python 3.10.4 for windows", + "code": "async def _wait_for_data(self, func_name):\n \n # StreamReader uses a future to link the protocol feed_data() method\n # to a read coroutine. Running two read coroutines at the same time\n # would have an unexpected behaviour. It would not possible to know\n # which coroutine would get the next data.\n if self._waiter is not None:\n raise RuntimeError(\n f'{func_name}() called while another coroutine is '\n f'already waiting for incoming data')\n\n assert not self._eof, '_wait_for_data after EOF'\n\n # Waiting for data while paused will make deadlock, so prevent it.\n # This is essential for readexactly(n) for case when n > self._limit.\n if self._paused:\n self._paused = False\n self._transport.resume_reading()\n\n self._waiter = self._loop.create_future()\n try:\n await self._waiter\n finally:\n self._waiter = None\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 289, + "n_words": 113, + "vocab_size": 85, + "complexity": 4, + "nloc": 14, + "token_counts": 72, + "n_ast_nodes": 133, + "n_identifiers": 11, + "d_id": 56110, + "documentation": { + "docstring": "Wait until feed_data() or feed_eof() is called.\n\n If stream was paused, automatically resume it.\n ", + "n_words": 14, + "vocab_size": 14, + "n_whitespaces": 28, + "language": "en" + } + }, + { + "id": 258531, + "commit_id": "432778464cbffc8ca675c1df786c31f8c23fc62c", + "repo": "scikit-learn", + "path": "sklearn/feature_selection/_univariate_selection.py", + "file_name": "_univariate_selection.py", + "fun_name": "chi2", + "commit_message": "[MRG] chi2: reduce memory footprint (#21837)\n\n* added sparse_output=True to LabelBinarizer in chi2\n\n* added changelog entry\n\n* Update sklearn/feature_selection/_univariate_selection.py\n\nCo-authored-by: Olivier Grisel \n\n* Update sklearn/feature_selection/_univariate_selection.py\n\nCo-authored-by: Olivier Grisel \n\nCo-authored-by: Wagner, Louis \nCo-authored-by: Olivier Grisel ", + "code": "def chi2(X, y):\n \n\n # XXX: we might want to do some of the following in logspace instead for\n # numerical stability.\n X = check_array(X, accept_sparse=\"csr\")\n if np.any((X.data if issparse(X) else X) < 0):\n raise ValueError(\"Input X must be non-negative.\")\n\n # Use a sparse representation for Y by default to reduce memory usage when\n # y has many unique classes.\n Y = LabelBinarizer(sparse_output=True).fit_transform(y)\n if Y.shape[1] == 1:\n Y = Y.toarray()\n Y = np.append(1 - Y, Y, axis=1)\n\n observed = safe_sparse_dot(Y.T, X) # n_classes * n_features\n\n if issparse(observed):\n # convert back to a dense array before calling _chisquare\n # XXX: could _chisquare be reimplement to accept sparse matrices for\n # cases where both n_classes and n_features are large (and X is\n # sparse)?\n observed = observed.toarray()\n\n feature_count = X.sum(axis=0).reshape(1, -1)\n class_prob = Y.mean(axis=0).reshape(1, -1)\n expected = np.dot(class_prob.T, feature_count)\n\n return _chisquare(observed, expected)\n\n", + "url": "https://github.com/scikit-learn/scikit-learn.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 241, + "n_words": 139, + "vocab_size": 99, + "complexity": 5, + "nloc": 15, + "token_counts": 168, + "n_ast_nodes": 275, + "n_identifiers": 29, + "d_id": 75276, + "documentation": { + "docstring": "Compute chi-squared stats between each non-negative feature and class.\n\n This score can be used to select the n_features features with the\n highest values for the test chi-squared statistic from X, which must\n contain only non-negative features such as booleans or frequencies\n (e.g., term counts in document classification), relative to the classes.\n\n Recall that the chi-square test measures dependence between stochastic\n variables, so using this function \"weeds out\" the features that are the\n most likely to be independent of class and therefore irrelevant for\n classification.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Sample vectors.\n\n y : array-like of shape (n_samples,)\n Target vector (class labels).\n\n Returns\n -------\n chi2 : ndarray of shape (n_features,)\n Chi2 statistics for each feature.\n\n p_values : ndarray of shape (n_features,)\n P-values for each feature.\n\n Notes\n -----\n Complexity of this algorithm is O(n_classes * n_features).\n\n See Also\n --------\n f_classif : ANOVA F-value between label/feature for classification tasks.\n f_regression : F-value between label/feature for regression tasks.\n ", + "n_words": 167, + "vocab_size": 119, + "n_whitespaces": 270, + "language": "en" + } + }, + { + "id": 197297, + "commit_id": "65be461082dda54c8748922f9c29a19af1279fe1", + "repo": "sympy", + "path": "sympy/combinatorics/fp_groups.py", + "file_name": "fp_groups.py", + "fun_name": "equals", + "commit_message": "Remove abbreviations in documentation", + "code": "def equals(self, word1, word2):\n \n if self.reduce(word1*word2**-1) == self.identity:\n return True\n elif self._rewriting_system.is_confluent:\n return False\n return None\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 66, + "n_words": 16, + "vocab_size": 14, + "complexity": 3, + "nloc": 6, + "token_counts": 40, + "n_ast_nodes": 64, + "n_identifiers": 8, + "d_id": 48440, + "documentation": { + "docstring": "\n Compare `word1` and `word2` for equality in the group\n using the group's rewriting system. If the system is\n confluent, the returned answer is necessarily correct.\n (If it is not, `False` could be returned in some cases\n where in fact `word1 == word2`)\n\n ", + "n_words": 42, + "vocab_size": 34, + "n_whitespaces": 85, + "language": "en" + } + }, + { + "id": 206250, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/template/defaulttags.py", + "file_name": "defaulttags.py", + "fun_name": "now", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def now(parser, token):\n \n bits = token.split_contents()\n asvar = None\n if len(bits) == 4 and bits[-2] == \"as\":\n asvar = bits[-1]\n bits = bits[:-2]\n if len(bits) != 2:\n raise TemplateSyntaxError(\"'now' statement takes one argument\")\n format_string = bits[1][1:-1]\n return NowNode(format_string, asvar)\n\n\n@register.tag", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "@register.tag", + "n_ast_errors": 1, + "ast_levels": 11, + "n_whitespaces": 81, + "n_words": 40, + "vocab_size": 31, + "complexity": 4, + "nloc": 10, + "token_counts": 81, + "n_ast_nodes": 141, + "n_identifiers": 12, + "d_id": 51441, + "documentation": { + "docstring": "\n Display the date, formatted according to the given string.\n\n Use the same format as PHP's ``date()`` function; see https://php.net/date\n for all the possible values.\n\n Sample usage::\n\n It is {% now \"jS F Y H:i\" %}\n ", + "n_words": 35, + "vocab_size": 32, + "n_whitespaces": 58, + "language": "en" + } + }, + { + "id": 196310, + "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", + "repo": "sympy", + "path": "sympy/geometry/polygon.py", + "file_name": "polygon.py", + "fun_name": "exradii", + "commit_message": "Updated import locations", + "code": "def exradii(self):\n \n\n side = self.sides\n a = side[0].length\n b = side[1].length\n c = side[2].length\n s = (a+b+c)/2\n area = self.area\n exradii = {self.sides[0]: simplify(area/(s-a)),\n self.sides[1]: simplify(area/(s-b)),\n self.sides[2]: simplify(area/(s-c))}\n\n return exradii\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 129, + "n_words": 30, + "vocab_size": 23, + "complexity": 1, + "nloc": 11, + "token_counts": 110, + "n_ast_nodes": 169, + "n_identifiers": 11, + "d_id": 47810, + "documentation": { + "docstring": "The radius of excircles of a triangle.\n\n An excircle of the triangle is a circle lying outside the triangle,\n tangent to one of its sides and tangent to the extensions of the\n other two.\n\n Returns\n =======\n\n exradii : dict\n\n See Also\n ========\n\n sympy.geometry.polygon.Triangle.inradius\n\n Examples\n ========\n\n The exradius touches the side of the triangle to which it is keyed, e.g.\n the exradius touching side 2 is:\n\n >>> from sympy import Point, Triangle\n >>> p1, p2, p3 = Point(0, 0), Point(6, 0), Point(0, 2)\n >>> t = Triangle(p1, p2, p3)\n >>> t.exradii[t.sides[2]]\n -2 + sqrt(10)\n\n References\n ==========\n\n [1] http://mathworld.wolfram.com/Exradius.html\n [2] http://mathworld.wolfram.com/Excircles.html\n\n ", + "n_words": 99, + "vocab_size": 71, + "n_whitespaces": 260, + "language": "en" + } + }, + { + "id": 196398, + "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", + "repo": "sympy", + "path": "sympy/matrices/repmatrix.py", + "file_name": "repmatrix.py", + "fun_name": "col_swap", + "commit_message": "Moved imports to higher level", + "code": "def col_swap(self, i, j):\n \n for k in range(0, self.rows):\n self[k, i], self[k, j] = self[k, j], self[k, i]\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 43, + "n_words": 18, + "vocab_size": 15, + "complexity": 2, + "nloc": 3, + "token_counts": 49, + "n_ast_nodes": 69, + "n_identifiers": 7, + "d_id": 47898, + "documentation": { + "docstring": "Swap the two given columns of the matrix in-place.\n\n Examples\n ========\n\n >>> from sympy import Matrix\n >>> M = Matrix([[1, 0], [1, 0]])\n >>> M\n Matrix([\n [1, 0],\n [1, 0]])\n >>> M.col_swap(0, 1)\n >>> M\n Matrix([\n [0, 1],\n [0, 1]])\n\n See Also\n ========\n\n col\n row_swap\n ", + "n_words": 45, + "vocab_size": 31, + "n_whitespaces": 171, + "language": "en" + } + }, + { + "id": 20769, + "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", + "repo": "pipenv", + "path": "pipenv/patched/notpip/_vendor/rich/live.py", + "file_name": "live.py", + "fun_name": "renderable", + "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", + "code": "def renderable(self) -> RenderableType:\n \n renderable = self.get_renderable()\n return Screen(renderable) if self._alt_screen else renderable\n", + "url": "https://github.com/pypa/pipenv.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 34, + "n_words": 13, + "vocab_size": 12, + "complexity": 2, + "nloc": 8, + "token_counts": 26, + "n_ast_nodes": 44, + "n_identifiers": 6, + "d_id": 3534, + "documentation": { + "docstring": "Get the renderable that is being displayed\n\n Returns:\n RenderableType: Displayed renderable.\n ", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 36, + "language": "en" + } + }, + { + "id": 75927, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/snippets/action_menu.py", + "file_name": "action_menu.py", + "fun_name": "get_base_snippet_action_menu_items", + "commit_message": "Reformat with black", + "code": "def get_base_snippet_action_menu_items(model):\n \n menu_items = [\n SaveMenuItem(order=0),\n DeleteMenuItem(order=10),\n ]\n\n for hook in hooks.get_hooks(\"register_snippet_action_menu_item\"):\n action_menu_item = hook(model)\n if action_menu_item:\n menu_items.append(action_menu_item)\n\n return menu_items\n\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 74, + "n_words": 20, + "vocab_size": 18, + "complexity": 3, + "nloc": 10, + "token_counts": 51, + "n_ast_nodes": 85, + "n_identifiers": 11, + "d_id": 16441, + "documentation": { + "docstring": "\n Retrieve the global list of menu items for the snippet action menu,\n which may then be customised on a per-request basis\n ", + "n_words": 21, + "vocab_size": 20, + "n_whitespaces": 31, + "language": "en" + } + }, + { + "id": 204387, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/core/cache/backends/base.py", + "file_name": "base.py", + "fun_name": "has_key", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def has_key(self, key, version=None):\n \n return (\n self.get(key, self._missing_key, version=version) is not self._missing_key\n )\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 45, + "n_words": 13, + "vocab_size": 13, + "complexity": 1, + "nloc": 4, + "token_counts": 34, + "n_ast_nodes": 51, + "n_identifiers": 6, + "d_id": 50721, + "documentation": { + "docstring": "\n Return True if the key is in the cache and has not expired.\n ", + "n_words": 13, + "vocab_size": 12, + "n_whitespaces": 28, + "language": "en" + } + }, + { + "id": 319887, + "commit_id": "0fdd3d56f43c8442a0c9ecd3cad07a88137ff7de", + "repo": "paperless-ngx", + "path": ".github/scripts/cleanup-tags.py", + "file_name": "cleanup-tags.py", + "fun_name": "_read_all_pages", + "commit_message": "Changes the cleanup images workflow so it uses a OAuth token with the correct scope (GITHUB_TOKEN is not enough). Also prevents running if the token is not defined and generally does commenting/cleanups\"", + "code": "def _read_all_pages(self, endpoint):\n \n internal_data = []\n\n while True:\n resp = self._session.get(endpoint)\n if resp.status_code == 200:\n internal_data += resp.json()\n if \"next\" in resp.links:\n endpoint = resp.links[\"next\"][\"url\"]\n else:\n logger.debug(\"Exiting pagination loop\")\n break\n else:\n logger.warning(f\"Request to {endpoint} return HTTP {resp.status_code}\")\n break\n\n return internal_data\n", + "url": "https://github.com/paperless-ngx/paperless-ngx.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 233, + "n_words": 40, + "vocab_size": 32, + "complexity": 4, + "nloc": 15, + "token_counts": 78, + "n_ast_nodes": 149, + "n_identifiers": 13, + "d_id": 117014, + "documentation": { + "docstring": "\n Internal function to read all pages of an endpoint, utilizing the\n next.url until exhausted\n ", + "n_words": 14, + "vocab_size": 14, + "n_whitespaces": 36, + "language": "en" + } + }, + { + "id": 224209, + "commit_id": "dca7cbb43fcd6ea7c677c98ba585395b070d387b", + "repo": "mkdocs", + "path": "mkdocs/commands/build.py", + "file_name": "build.py", + "fun_name": "_build_template", + "commit_message": "Format code with `black -l100 --skip-string-normalization`", + "code": "def _build_template(name, template, files, config, nav):\n \n\n # Run `pre_template` plugin events.\n template = config['plugins'].run_event(\n 'pre_template', template, template_name=name, config=config\n )\n\n if utils.is_error_template(name):\n # Force absolute URLs in the nav of error pages and account for the\n # possibility that the docs root might be different than the server root.\n # See https://github.com/mkdocs/mkdocs/issues/77.\n # However, if site_url is not set, assume the docs root and server root\n # are the same. See https://github.com/mkdocs/mkdocs/issues/1598.\n base_url = urlsplit(config['site_url'] or '/').path\n else:\n base_url = utils.get_relative_url('.', name)\n\n context = get_context(nav, files, config, base_url=base_url)\n\n # Run `template_context` plugin events.\n context = config['plugins'].run_event(\n 'template_context', context, template_name=name, config=config\n )\n\n output = template.render(context)\n\n # Run `post_template` plugin events.\n output = config['plugins'].run_event('post_template', output, template_name=name, config=config)\n\n return output\n\n", + "url": "https://github.com/mkdocs/mkdocs.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 221, + "n_words": 116, + "vocab_size": 73, + "complexity": 3, + "nloc": 15, + "token_counts": 134, + "n_ast_nodes": 221, + "n_identifiers": 18, + "d_id": 57244, + "documentation": { + "docstring": "\n Return rendered output for given template as a string.\n ", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 16, + "language": "en" + } + }, + { + "id": 224944, + "commit_id": "df3739d51903ab56771ac071a05b5aa9cdf9e129", + "repo": "mkdocs", + "path": "mkdocs/utils/__init__.py", + "file_name": "__init__.py", + "fun_name": "get_build_datetime", + "commit_message": "Add a lot more type annotations, fix new type warnings (#2970)\n\n(including some behavior changes, assumed to be no-op)\r\n\r\nThis is based on auto-generated annotations from \"monkeytype\".", + "code": "def get_build_datetime() -> datetime:\n \n source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH')\n if source_date_epoch is None:\n return datetime.now(timezone.utc)\n\n return datetime.fromtimestamp(int(source_date_epoch), timezone.utc)\n\n", + "url": "https://github.com/mkdocs/mkdocs.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 35, + "n_words": 16, + "vocab_size": 14, + "complexity": 2, + "nloc": 11, + "token_counts": 45, + "n_ast_nodes": 76, + "n_identifiers": 11, + "d_id": 57432, + "documentation": { + "docstring": "\n Returns an aware datetime object.\n\n Support SOURCE_DATE_EPOCH environment variable for reproducible builds.\n See https://reproducible-builds.org/specs/source-date-epoch/\n ", + "n_words": 14, + "vocab_size": 14, + "n_whitespaces": 27, + "language": "en" + } + }, + { + "id": 192301, + "commit_id": "c50d48845f7b1ca86d6a3b7f37a59be0ae11e36b", + "repo": "vision", + "path": "test/test_video_reader.py", + "file_name": "test_video_reader.py", + "fun_name": "test_read_video_from_file_audio_resampling", + "commit_message": "Improve test_video_reader (#5498)\n\n* Improve test_video_reader\r\n\r\n* Fix linter error", + "code": "def test_read_video_from_file_audio_resampling(self, test_video, samples):\n \n # video related\n width, height, min_dimension, max_dimension = 0, 0, 0, 0\n video_start_pts, video_end_pts = 0, -1\n video_timebase_num, video_timebase_den = 0, 1\n # audio related\n channels = 0\n audio_start_pts, audio_end_pts = 0, -1\n audio_timebase_num, audio_timebase_den = 0, 1\n\n full_path = os.path.join(VIDEO_DIR, test_video)\n\n tv_result = torch.ops.video_reader.read_video_from_file(\n full_path,\n SEEK_FRAME_MARGIN,\n 0, # getPtsOnly\n 1, # readVideoStream\n width,\n height,\n min_dimension,\n max_dimension,\n video_start_pts,\n video_end_pts,\n video_timebase_num,\n video_timebase_den,\n 1, # readAudioStream\n samples,\n channels,\n audio_start_pts,\n audio_end_pts,\n audio_timebase_num,\n audio_timebase_den,\n )\n (\n vframes,\n vframe_pts,\n vtimebase,\n vfps,\n vduration,\n aframes,\n aframe_pts,\n atimebase,\n asample_rate,\n aduration,\n ) = tv_result\n if aframes.numel() > 0:\n assert samples == asample_rate.item()\n assert 1 == aframes.size(1)\n # when audio stream is found\n duration = float(aframe_pts[-1]) * float(atimebase[0]) / float(atimebase[1])\n assert aframes.size(0) == approx(int(duration * asample_rate.item()), abs=0.1 * asample_rate.item())\n", + "url": "https://github.com/pytorch/vision.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 605, + "n_words": 123, + "vocab_size": 80, + "complexity": 2, + "nloc": 46, + "token_counts": 228, + "n_ast_nodes": 327, + "n_identifiers": 46, + "d_id": 46871, + "documentation": { + "docstring": "\n Test the case when decoder starts with a video file to decode frames, and\n audio waveform are resampled\n ", + "n_words": 18, + "vocab_size": 18, + "n_whitespaces": 40, + "language": "en" + } + }, + { + "id": 66950, + "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", + "repo": "erpnext", + "path": "erpnext/payroll/report/income_tax_deductions/income_tax_deductions.py", + "file_name": "income_tax_deductions.py", + "fun_name": "get_data", + "commit_message": "style: format code with black", + "code": "def get_data(filters):\n\n\tdata = []\n\n\tif erpnext.get_region() == \"India\":\n\t\temployee_pan_dict = frappe._dict(\n\t\t\tfrappe.db.sql()\n\t\t)\n\n\tcomponent_types = frappe.db.sql(\n\t\t\n\t)\n\n\tcomponent_types = [comp_type[0] for comp_type in component_types]\n\n\tif not len(component_types):\n\t\treturn []\n\n\tconditions = get_conditions(filters)\n\n\tentry = frappe.db.sql(\n\t\t\n\t\t% (conditions, \", \".join([\"%s\"] * len(component_types))),\n\t\ttuple(component_types),\n\t\tas_dict=1,\n\t)\n\n\tfor d in entry:\n\n\t\temployee = {\n\t\t\t\"employee\": d.employee,\n\t\t\t\"employee_name\": d.employee_name,\n\t\t\t\"it_comp\": d.salary_component,\n\t\t\t\"posting_date\": d.posting_date,\n\t\t\t# \"pan_number\": employee_pan_dict.get(d.employee),\n\t\t\t\"it_amount\": d.amount,\n\t\t\t\"gross_pay\": d.gross_pay,\n\t\t}\n\n\t\tif erpnext.get_region() == \"India\":\n\t\t\temployee[\"pan_number\"] = employee_pan_dict.get(d.employee)\n\n\t\tdata.append(employee)\n\n\treturn data\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 47, + "n_words": 78, + "vocab_size": 57, + "complexity": 6, + "nloc": 40, + "token_counts": 184, + "n_ast_nodes": 310, + "n_identifiers": 28, + "d_id": 14386, + "documentation": { + "docstring": " select employee, pan_number from `tabEmployee` select name from `tabSalary Component`\n\t\twhere is_income_tax_component = 1 select sal.employee, sal.employee_name, sal.posting_date, ded.salary_component, ded.amount,sal.gross_pay\n\t\tfrom `tabSalary Slip` sal, `tabSalary Detail` ded\n\t\twhere sal.name = ded.parent\n\t\tand ded.parentfield = 'deductions'\n\t\tand ded.parenttype = 'Salary Slip'\n\t\tand sal.docstatus = 1 %s\n\t\tand ded.salary_component in (%s)\n\t", + "n_words": 49, + "vocab_size": 34, + "n_whitespaces": 43, + "language": "en" + } + }, + { + "id": 206640, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/utils/encoding.py", + "file_name": "encoding.py", + "fun_name": "smart_str", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def smart_str(s, encoding=\"utf-8\", strings_only=False, errors=\"strict\"):\n \n if isinstance(s, Promise):\n # The input is the result of a gettext_lazy() call.\n return s\n return force_str(s, encoding, strings_only, errors)\n\n\n_PROTECTED_TYPES = (\n type(None),\n int,\n float,\n Decimal,\n datetime.datetime,\n datetime.date,\n datetime.time,\n)\n\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 7, + "n_whitespaces": 78, + "n_words": 36, + "vocab_size": 35, + "complexity": 2, + "nloc": 4, + "token_counts": 39, + "n_ast_nodes": 97, + "n_identifiers": 16, + "d_id": 51599, + "documentation": { + "docstring": "\n Return a string representing 's'. Treat bytestrings using the 'encoding'\n codec.\n\n If strings_only is True, don't convert (some) non-string-like objects.\n ", + "n_words": 20, + "vocab_size": 20, + "n_whitespaces": 33, + "language": "en" + } + }, + { + "id": 178818, + "commit_id": "613c31d98f20bdd9a4e5884c99826a06a3328438", + "repo": "Nuitka", + "path": "nuitka/Options.py", + "file_name": "Options.py", + "fun_name": "mayDisableConsoleWindow", + "commit_message": "Standalone: Added support for requiring modes\n\n* For wx on macOS, console must be disabled, avoid the trap.\n\n* For the PySide2, on macOS the --onefile must be used when the\n application bundle is built or else signing has issues.\n\n* Recommend to use new option --disable-console for PySide2, PySide6\n and wx on non-macOS", + "code": "def mayDisableConsoleWindow():\n \n\n # TODO: What about MSYS2?\n return isWin32Windows() or isMacOS()\n\n", + "url": "https://github.com/Nuitka/Nuitka.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 20, + "n_words": 11, + "vocab_size": 11, + "complexity": 2, + "nloc": 2, + "token_counts": 13, + "n_ast_nodes": 27, + "n_identifiers": 3, + "d_id": 42834, + "documentation": { + "docstring": ":returns: bool derived from platform support of disabling the console,", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 248480, + "commit_id": "7bc08f320147a1d80371eb13258328c88073fad0", + "repo": "synapse", + "path": "tests/test_mau.py", + "file_name": "test_mau.py", + "fun_name": "test_as_ignores_mau", + "commit_message": "Remove remaining bits of groups code. (#12936)\n\n* Update worker docs to remove group endpoints.\r\n* Removes an unused parameter to `ApplicationService`.\r\n* Break dependency between media repo and groups.\r\n* Avoid copying `m.room.related_groups` state events during room upgrades.", + "code": "def test_as_ignores_mau(self):\n \n\n # Create and sync so that the MAU counts get updated\n token1 = self.create_user(\"kermit1\")\n self.do_sync_for_user(token1)\n token2 = self.create_user(\"kermit2\")\n self.do_sync_for_user(token2)\n\n # check we're testing what we think we are: there should be two active users\n self.assertEqual(self.get_success(self.store.get_monthly_active_count()), 2)\n\n # We've created and activated two users, we shouldn't be able to\n # register new users\n with self.assertRaises(SynapseError) as cm:\n self.create_user(\"kermit3\")\n\n e = cm.exception\n self.assertEqual(e.code, 403)\n self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)\n\n # Cheekily add an application service that we use to register a new user\n # with.\n as_token = \"foobartoken\"\n self.store.services_cache.append(\n ApplicationService(\n token=as_token,\n id=\"SomeASID\",\n sender=\"@as_sender:test\",\n namespaces={\"users\": [{\"regex\": \"@as_*\", \"exclusive\": True}]},\n )\n )\n\n self.create_user(\"as_kermit4\", token=as_token, appservice=True)\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 333, + "n_words": 100, + "vocab_size": 79, + "complexity": 1, + "nloc": 22, + "token_counts": 163, + "n_ast_nodes": 272, + "n_identifiers": 28, + "d_id": 72300, + "documentation": { + "docstring": "Test that application services can still create users when the MAU\n limit has been reached. This only works when application service\n user ip tracking is disabled.\n ", + "n_words": 26, + "vocab_size": 24, + "n_whitespaces": 47, + "language": "en" + } + }, + { + "id": 86878, + "commit_id": "941184cd24186324fd9f7f304b7f713041834726", + "repo": "sentry", + "path": "src/sentry/region_to_control/producer.py", + "file_name": "producer.py", + "fun_name": "get_region_to_control_producer", + "commit_message": "chore(hybrid-cloud): AuditLogEntry is a control silo model now (#39890)\n\nIn the control silo, creating an audit log entry writes to the db\r\ndirectly, whilst in region silo mode creating an audit log entry will\r\ninstead push to a new kafka producer that consumes into the control silo\r\nasynchronously.", + "code": "def get_region_to_control_producer() -> KafkaProducer:\n \n global _publisher\n if _publisher is None:\n config = settings.KAFKA_TOPICS.get(settings.KAFKA_REGION_TO_CONTROL)\n _publisher = KafkaProducer(\n kafka_config.get_kafka_producer_cluster_options(config[\"cluster\"])\n )\n", + "url": "https://github.com/getsentry/sentry.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 59, + "n_words": 18, + "vocab_size": 15, + "complexity": 2, + "nloc": 14, + "token_counts": 48, + "n_ast_nodes": 69, + "n_identifiers": 10, + "d_id": 18181, + "documentation": { + "docstring": "\n Creates, if necessary, an arroyo.KafkaProducer client configured for region to control communication and returns\n it, caching it for future calls. Installs an exit handler to close the worker thread processes.\n ", + "n_words": 30, + "vocab_size": 27, + "n_whitespaces": 41, + "language": "en" + } + }, + { + "id": 240862, + "commit_id": "5dc67fa7a7314cab97d4c96a30fdf4c5661c9039", + "repo": "plotly.py", + "path": "packages/python/plotly/plotly/basedatatypes.py", + "file_name": "basedatatypes.py", + "fun_name": "get_subplot", + "commit_message": "fix subplot imports", + "code": "def get_subplot(self, row, col, secondary_y=False):\n \n from plotly._subplots import _get_grid_subplot\n\n return _get_grid_subplot(self, row, col, secondary_y)\n\n # Child property operations\n # -------------------------", + "url": "https://github.com/plotly/plotly.py.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 7, + "n_whitespaces": 47, + "n_words": 20, + "vocab_size": 17, + "complexity": 1, + "nloc": 3, + "token_counts": 31, + "n_ast_nodes": 47, + "n_identifiers": 8, + "d_id": 68972, + "documentation": { + "docstring": "\n Return an object representing the subplot at the specified row\n and column. May only be used on Figures created using\n plotly.tools.make_subplots\n\n Parameters\n ----------\n row: int\n 1-based index of subplot row\n col: int\n 1-based index of subplot column\n secondary_y: bool\n If True, select the subplot that consists of the x-axis and the\n secondary y-axis at the specified row/col. Only valid if the\n subplot at row/col is an 2D cartesian subplot that was created\n with a secondary y-axis. See the docstring for the specs argument\n to make_subplots for more info on creating a subplot with a\n secondary y-axis.\n Returns\n -------\n subplot\n * None: if subplot is empty\n * plotly.graph_objs.layout.Scene: if subplot type is 'scene'\n * plotly.graph_objs.layout.Polar: if subplot type is 'polar'\n * plotly.graph_objs.layout.Ternary: if subplot type is 'ternary'\n * plotly.graph_objs.layout.Mapbox: if subplot type is 'ternary'\n * SubplotDomain namedtuple with `x` and `y` fields:\n if subplot type is 'domain'.\n - x: length 2 list of the subplot start and stop width\n - y: length 2 list of the subplot start and stop height\n * SubplotXY namedtuple with `xaxis` and `yaxis` fields:\n if subplot type is 'xy'.\n - xaxis: plotly.graph_objs.layout.XAxis instance for subplot\n - yaxis: plotly.graph_objs.layout.YAxis instance for subplot\n ", + "n_words": 195, + "vocab_size": 99, + "n_whitespaces": 533, + "language": "en" + } + }, + { + "id": 199152, + "commit_id": "e0aaa724190c49f2725bb7880eddd13ce4fef4b7", + "repo": "sympy", + "path": "sympy/polys/matrices/linsolve.py", + "file_name": "linsolve.py", + "fun_name": "_linear_eq_to_dict", + "commit_message": "more efficient coefficient extraction", + "code": "def _linear_eq_to_dict(eqs, syms):\n \n coeffs = []\n ind = []\n symset = set(syms)\n for i, e in enumerate(eqs):\n c, d = _lin_eq2dict(e, symset)\n coeffs.append(d)\n ind.append(c)\n return coeffs, ind\n\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 66, + "n_words": 27, + "vocab_size": 22, + "complexity": 2, + "nloc": 9, + "token_counts": 58, + "n_ast_nodes": 95, + "n_identifiers": 14, + "d_id": 49169, + "documentation": { + "docstring": "Convert a system Expr/Eq equations into dict form, returning\n the coefficient dictionaries and a list of syms-independent terms\n from each expression in ``eqs```.\n\n Examples\n ========\n\n >>> from sympy.polys.matrices.linsolve import _linear_eq_to_dict\n >>> from sympy.abc import x\n >>> _linear_eq_to_dict([2*x + 3], {x})\n ([{x: 2}], [3])\n ", + "n_words": 43, + "vocab_size": 37, + "n_whitespaces": 70, + "language": "en" + } + }, + { + "id": 136916, + "commit_id": "b52a81b3de6f4b7015c6694049d094f2964e1c96", + "repo": "ray", + "path": "rllib/env/wrappers/model_vector_env.py", + "file_name": "model_vector_env.py", + "fun_name": "vector_reset", + "commit_message": "[RLlib] Preparation for gymnasium/gym0.26 upgrade: Deprecate `horizon` and `soft_horizon` settings. (#30583)", + "code": "def vector_reset(self):\n \n self.cur_obs = [e.reset() for e in self.envs]\n self._timesteps = [0 for _ in range(self.num_envs)]\n return self.cur_obs\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 46, + "n_words": 18, + "vocab_size": 14, + "complexity": 3, + "nloc": 4, + "token_counts": 43, + "n_ast_nodes": 69, + "n_identifiers": 10, + "d_id": 31023, + "documentation": { + "docstring": "Override parent to store actual env obs for upcoming predictions.", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 100610, + "commit_id": "60291d49c4da1cd260fbc0b04aa6a312eedfefbb", + "repo": "faceswap", + "path": "plugins/convert/writer/_base.py", + "file_name": "_base.py", + "fun_name": "cache_frame", + "commit_message": "ffmpeg writer: Create new filename if output pre-exists", + "code": "def cache_frame(self, filename, image) -> None:\n \n frame_no = int(re.search(self.re_search, filename).group())\n self.cache[frame_no] = image\n logger.trace(\"Added to cache. Frame no: %s\", frame_no)\n logger.trace(\"Current cache: %s\", sorted(self.cache.keys()))\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 59, + "n_words": 24, + "vocab_size": 22, + "complexity": 1, + "nloc": 17, + "token_counts": 64, + "n_ast_nodes": 105, + "n_identifiers": 15, + "d_id": 20072, + "documentation": { + "docstring": " Add the incoming converted frame to the cache ready for writing out.\n\n Used for ffmpeg and gif writers to ensure that the frames are written out in the correct\n order.\n\n Parameters\n ----------\n filename: str\n The filename of the incoming frame, where the frame index can be extracted from\n image: class:`numpy.ndarray`\n The converted frame corresponding to the given filename\n ", + "n_words": 58, + "vocab_size": 43, + "n_whitespaces": 130, + "language": "en" + } + }, + { + "id": 64335, + "commit_id": "b68a99675d12a1ffbda538ee07a2020ba66fb3cc", + "repo": "erpnext", + "path": "erpnext/__init__.py", + "file_name": "__init__.py", + "fun_name": "allow_regional", + "commit_message": "fix: allow `regional_overrides` hook to be set in subsequent apps", + "code": "def allow_regional(fn):\n\t\n\n\tdef caller(*args, **kwargs):\n\t\toverrides = frappe.get_hooks(\"regional_overrides\", {}).get(get_region())\n\t\tfunction_path = f\"{inspect.getmodule(fn).__name__}.{fn.__name__}\"\n\n\t\tif not overrides or function_path not in overrides:\n\t\t\treturn fn(*args, **kwargs)\n\n\t\t# Priority given to last installed app\n\t\treturn frappe.get_attr(overrides[function_path][-1])(*args, **kwargs)\n\n\treturn caller\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 26, + "n_words": 35, + "vocab_size": 27, + "complexity": 1, + "nloc": 3, + "token_counts": 10, + "n_ast_nodes": 152, + "n_identifiers": 15, + "d_id": 13605, + "documentation": { + "docstring": "Decorator to make a function regionally overridable\n\n\tExample:\n\t@erpnext.allow_regional\n\tdef myfunction():\n\t pass", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 243725, + "commit_id": "2ae55ccbdad9c842929fb238ea1eb81d1f999024", + "repo": "Pillow", + "path": "src/PIL/Image.py", + "file_name": "Image.py", + "fun_name": "getchannel", + "commit_message": "Improve exception traceback readability", + "code": "def getchannel(self, channel):\n \n self.load()\n\n if isinstance(channel, str):\n try:\n channel = self.getbands().index(channel)\n except ValueError as e:\n msg = f'The image has no channel \"{channel}\"'\n raise ValueError(msg) from e\n\n return self._new(self.im.getband(channel))\n", + "url": "https://github.com/python-pillow/Pillow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 124, + "n_words": 29, + "vocab_size": 27, + "complexity": 3, + "nloc": 9, + "token_counts": 65, + "n_ast_nodes": 112, + "n_identifiers": 14, + "d_id": 70094, + "documentation": { + "docstring": "\n Returns an image containing a single channel of the source image.\n\n :param channel: What channel to return. Could be index\n (0 for \"R\" channel of \"RGB\") or channel name\n (\"A\" for alpha channel of \"RGBA\").\n :returns: An image in \"L\" mode.\n\n .. versionadded:: 4.3.0\n ", + "n_words": 44, + "vocab_size": 36, + "n_whitespaces": 98, + "language": "en" + } + }, + { + "id": 100872, + "commit_id": "94c3dcff7ebd02a5a5758f33a3eb2bfc66282117", + "repo": "faceswap", + "path": "plugins/train/model/_base/model.py", + "file_name": "model.py", + "fun_name": "_update_legacy_config", + "commit_message": "Training updates\n - Add multiple selected loss functions\n - Unlock loss as a model configuration\n - Phaze-A remove encoder scaling max xap", + "code": "def _update_legacy_config(self) -> bool:\n \n logger.debug(\"Checking for legacy state file update\")\n priors = [\"dssim_loss\", \"mask_type\", \"mask_type\", \"l2_reg_term\"]\n new_items = [\"loss_function\", \"learn_mask\", \"mask_type\", \"loss_function_2\"]\n updated = False\n for old, new in zip(priors, new_items):\n if old not in self._config:\n logger.debug(\"Legacy item '%s' not in config. Skipping update\", old)\n continue\n\n # dssim_loss > loss_function\n if old == \"dssim_loss\":\n self._config[new] = \"ssim\" if self._config[old] else \"mae\"\n del self._config[old]\n updated = True\n logger.info(\"Updated config from legacy dssim format. New config loss \"\n \"function: '%s'\", self._config[new])\n continue\n\n # Add learn mask option and set to True if model has \"penalized_mask_loss\" specified\n if old == \"mask_type\" and new == \"learn_mask\" and new not in self._config:\n self._config[new] = self._config[\"mask_type\"] is not None\n updated = True\n logger.info(\"Added new 'learn_mask' config item for this model. Value set to: %s\",\n self._config[new])\n continue\n\n # Replace removed masks with most similar equivalent\n if old == \"mask_type\" and new == \"mask_type\" and self._config[old] in (\"facehull\",\n \"dfl_full\"):\n old_mask = self._config[old]\n self._config[new] = \"components\"\n updated = True\n logger.info(\"Updated 'mask_type' from '%s' to '%s' for this model\",\n old_mask, self._config[new])\n\n # Replace l2_reg_term with the correct loss_2_function and update the value of\n # loss_2_weight\n if old == \"l2_reg_term\":\n self._config[new] = \"mse\"\n self._config[\"loss_weight_2\"] = self._config[old]\n del self._config[old]\n updated = True\n logger.info(\"Updated config from legacy 'l2_reg_term' to 'loss_function_2'\")\n\n logger.debug(\"State file updated for legacy config: %s\", updated)\n return updated\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 846, + "n_words": 217, + "vocab_size": 116, + "complexity": 12, + "nloc": 60, + "token_counts": 272, + "n_ast_nodes": 471, + "n_identifiers": 14, + "d_id": 20323, + "documentation": { + "docstring": " Legacy updates for new config additions.\n\n When new config items are added to the Faceswap code, existing model state files need to be\n updated to handle these new items.\n\n Current existing legacy update items:\n\n * loss - If old `dssim_loss` is ``true`` set new `loss_function` to `ssim` otherwise\n set it to `mae`. Remove old `dssim_loss` item\n\n * l2_reg_term - If this exists, set loss_function_2 to ``mse`` and loss_weight_2 to\n the value held in the old ``l2_reg_term`` item\n\n * masks - If `learn_mask` does not exist then it is set to ``True`` if `mask_type` is\n not ``None`` otherwise it is set to ``False``.\n\n * masks type - Replace removed masks 'dfl_full' and 'facehull' with `components` mask\n\n Returns\n -------\n bool\n ``True`` if legacy items exist and state file has been updated, otherwise ``False``\n ", + "n_words": 131, + "vocab_size": 82, + "n_whitespaces": 269, + "language": "en" + } + }, + { + "id": 102634, + "commit_id": "89f15f591cc3cc3e8ae40e95ffc802f7f2561ece", + "repo": "chia-blockchain", + "path": "chia/types/spend_bundle.py", + "file_name": "spend_bundle.py", + "fun_name": "get_memos", + "commit_message": "Merge standalone wallet into main (#9793)\n\n* wallet changes from pac\r\n\r\n* cat changes\r\n\r\n* pool tests\r\n\r\n* pooling tests passing\r\n\r\n* offers\r\n\r\n* lint\r\n\r\n* mempool_mode\r\n\r\n* black\r\n\r\n* linting\r\n\r\n* workflow files\r\n\r\n* flake8\r\n\r\n* more cleanup\r\n\r\n* renamed\r\n\r\n* remove obsolete test, don't cast announcement\r\n\r\n* memos are not only bytes32\r\n\r\n* trade renames\r\n\r\n* fix rpcs, block_record\r\n\r\n* wallet rpc, recompile settlement clvm\r\n\r\n* key derivation\r\n\r\n* clvm tests\r\n\r\n* lgtm issues and wallet peers\r\n\r\n* stash\r\n\r\n* rename\r\n\r\n* mypy linting\r\n\r\n* flake8\r\n\r\n* bad initializer\r\n\r\n* flaky tests\r\n\r\n* Make CAT wallets only create on verified hints (#9651)\r\n\r\n* fix clvm tests\r\n\r\n* return to log lvl warn\r\n\r\n* check puzzle unhardened\r\n\r\n* public key, not bytes. api caching change\r\n\r\n* precommit changes\r\n\r\n* remove unused import\r\n\r\n* mypy ci file, tests\r\n\r\n* ensure balance before creating a tx\r\n\r\n* Remove CAT logic from full node test (#9741)\r\n\r\n* Add confirmations and sleeps for wallet (#9742)\r\n\r\n* use pool executor\r\n\r\n* rever merge mistakes/cleanup\r\n\r\n* Fix trade test flakiness (#9751)\r\n\r\n* remove precommit\r\n\r\n* older version of black\r\n\r\n* lint only in super linter\r\n\r\n* Make announcements in RPC be objects instead of bytes (#9752)\r\n\r\n* Make announcements in RPC be objects instead of bytes\r\n\r\n* Lint\r\n\r\n* misc hint'ish cleanup (#9753)\r\n\r\n* misc hint'ish cleanup\r\n\r\n* unremove some ci bits\r\n\r\n* Use main cached_bls.py\r\n\r\n* Fix bad merge in main_pac (#9774)\r\n\r\n* Fix bad merge at 71da0487b9cd5564453ec24b76f1ac773c272b75\r\n\r\n* Remove unused ignores\r\n\r\n* more unused ignores\r\n\r\n* Fix bad merge at 3b143e705057d6c14e2fb3e00078aceff0552d7e\r\n\r\n* One more byte32.from_hexstr\r\n\r\n* Remove obsolete test\r\n\r\n* remove commented out\r\n\r\n* remove duplicate payment object\r\n\r\n* remove long sync\r\n\r\n* remove unused test, noise\r\n\r\n* memos type\r\n\r\n* bytes32\r\n\r\n* make it clear it's a single state at a time\r\n\r\n* copy over asset ids from pacr\r\n\r\n* file endl linter\r\n\r\n* Update chia/server/ws_connection.py\r\n\r\nCo-authored-by: dustinface <35775977+xdustinface@users.noreply.github.com>\r\n\r\nCo-authored-by: Matt Hauff \r\nCo-authored-by: Kyle Altendorf \r\nCo-authored-by: dustinface <35775977+xdustinface@users.noreply.github.com>", + "code": "def get_memos(self) -> Dict[bytes32, List[bytes]]:\n \n memos: Dict[bytes32, List[bytes]] = {}\n for coin_spend in self.coin_spends:\n result = Program.from_bytes(bytes(coin_spend.puzzle_reveal)).run(\n Program.from_bytes(bytes(coin_spend.solution))\n )\n for condition in result.as_python():\n if condition[0] == ConditionOpcode.CREATE_COIN and len(condition) >= 4:\n # If only 3 elements (opcode + 2 args), there is no memo, this is ph, amount\n coin_added = Coin(coin_spend.coin.name(), bytes32(condition[1]), int_from_bytes(condition[2]))\n if type(condition[3]) != list:\n # If it's not a list, it's not the correct format\n continue\n memos[coin_added.name()] = condition[3]\n return memos\n\n # Note that `coin_spends` used to have the bad name `coin_solutions`.\n # Some API still expects this name. For now, we accept both names.\n #\n # TODO: continue this deprecation. Eventually, all code below here should be removed.\n # 1. set `exclude_modern_keys` to `False` (and manually set to `True` where necessary)\n # 2. set `include_legacy_keys` to `False` (and manually set to `False` where necessary)\n # 3. remove all references to `include_legacy_keys=True`\n # 4. remove all code below this point\n", + "url": "https://github.com/Chia-Network/chia-blockchain.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 394, + "n_words": 153, + "vocab_size": 109, + "complexity": 6, + "nloc": 18, + "token_counts": 146, + "n_ast_nodes": 235, + "n_identifiers": 27, + "d_id": 21558, + "documentation": { + "docstring": "\n Retrieves the memos for additions in this spend_bundle, which are formatted as a list in the 3rd parameter of\n CREATE_COIN. If there are no memos, the addition coin_id is not included. If they are not formatted as a list\n of bytes, they are not included. This is expensive to call, it should not be used in full node code.\n ", + "n_words": 59, + "vocab_size": 40, + "n_whitespaces": 88, + "language": "en" + } + }, + { + "id": 261652, + "commit_id": "758fe0d9c72ba343097003e7992c9239e58bfc63", + "repo": "scikit-learn", + "path": "sklearn/model_selection/tests/test_plot.py", + "file_name": "test_plot.py", + "fun_name": "test_learning_curve_display_default_usage", + "commit_message": "FEA add LearningCurveDisplay to show plot learning curve (#24084)\n\nCo-authored-by: jeremie du boisberranger \r\nCo-authored-by: Arturo Amor <86408019+ArturoAmorQ@users.noreply.github.com>", + "code": "def test_learning_curve_display_default_usage(pyplot, data):\n \n X, y = data\n estimator = DecisionTreeClassifier(random_state=0)\n\n train_sizes = [0.3, 0.6, 0.9]\n display = LearningCurveDisplay.from_estimator(\n estimator, X, y, train_sizes=train_sizes\n )\n\n import matplotlib as mpl\n\n assert display.errorbar_ is None\n\n assert isinstance(display.lines_, list)\n for line in display.lines_:\n assert isinstance(line, mpl.lines.Line2D)\n\n assert isinstance(display.fill_between_, list)\n for fill in display.fill_between_:\n assert isinstance(fill, mpl.collections.PolyCollection)\n assert fill.get_alpha() == 0.5\n\n assert display.score_name == \"Score\"\n assert display.ax_.get_xlabel() == \"Number of samples in the training set\"\n assert display.ax_.get_ylabel() == \"Score\"\n\n _, legend_labels = display.ax_.get_legend_handles_labels()\n assert legend_labels == [\"Testing metric\"]\n\n train_sizes_abs, train_scores, test_scores = learning_curve(\n estimator, X, y, train_sizes=train_sizes\n )\n\n assert_array_equal(display.train_sizes, train_sizes_abs)\n assert_allclose(display.train_scores, train_scores)\n assert_allclose(display.test_scores, test_scores)\n\n", + "url": "https://github.com/scikit-learn/scikit-learn.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 199, + "n_words": 98, + "vocab_size": 68, + "complexity": 3, + "nloc": 27, + "token_counts": 211, + "n_ast_nodes": 313, + "n_identifiers": 39, + "d_id": 76917, + "documentation": { + "docstring": "Check the default usage of the LearningCurveDisplay class.", + "n_words": 8, + "vocab_size": 7, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 303231, + "commit_id": "5ee2f4f438f8acb119308738639169138b15662c", + "repo": "core", + "path": "tests/components/sensibo/test_climate.py", + "file_name": "test_climate.py", + "fun_name": "test_climate_find_valid_targets", + "commit_message": "Sensibo Set temperature improvement (#72992)", + "code": "async def test_climate_find_valid_targets():\n \n\n valid_targets = [10, 16, 17, 18, 19, 20]\n\n assert _find_valid_target_temp(7, valid_targets) == 10\n assert _find_valid_target_temp(10, valid_targets) == 10\n assert _find_valid_target_temp(11, valid_targets) == 16\n assert _find_valid_target_temp(15, valid_targets) == 16\n assert _find_valid_target_temp(16, valid_targets) == 16\n assert _find_valid_target_temp(18.5, valid_targets) == 19\n assert _find_valid_target_temp(20, valid_targets) == 20\n assert _find_valid_target_temp(25, valid_targets) == 20\n\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 81, + "n_words": 51, + "vocab_size": 26, + "complexity": 1, + "nloc": 10, + "token_counts": 94, + "n_ast_nodes": 135, + "n_identifiers": 3, + "d_id": 102059, + "documentation": { + "docstring": "Test function to return temperature from valid targets.", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 249919, + "commit_id": "854a6884d81c95297bf93badcddc00a4cab93418", + "repo": "synapse", + "path": "tests/replication/test_pusher_shard.py", + "file_name": "test_pusher_shard.py", + "fun_name": "test_send_push_single_worker", + "commit_message": "Modernize unit tests configuration settings for workers. (#14568)\n\nUse the newer foo_instances configuration instead of the\r\ndeprecated flags to enable specific features (e.g. start_pushers).", + "code": "def test_send_push_single_worker(self):\n \n http_client_mock = Mock(spec_set=[\"post_json_get_json\"])\n http_client_mock.post_json_get_json.side_effect = (\n lambda *_, **__: defer.succeed({})\n )\n\n self.make_worker_hs(\n \"synapse.app.generic_worker\",\n {\"worker_name\": \"pusher1\", \"pusher_instances\": [\"pusher1\"]},\n proxied_blacklisted_http_client=http_client_mock,\n )\n\n event_id = self._create_pusher_and_send_msg(\"user\")\n\n # Advance time a bit, so the pusher will register something has happened\n self.pump()\n\n http_client_mock.post_json_get_json.assert_called_once()\n self.assertEqual(\n http_client_mock.post_json_get_json.call_args[0][0],\n \"https://push.example.com/_matrix/push/v1/notify\",\n )\n self.assertEqual(\n event_id,\n http_client_mock.post_json_get_json.call_args[0][1][\"notification\"][\n \"event_id\"\n ],\n )\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 261, + "n_words": 49, + "vocab_size": 43, + "complexity": 1, + "nloc": 23, + "token_counts": 125, + "n_ast_nodes": 213, + "n_identifiers": 19, + "d_id": 73196, + "documentation": { + "docstring": "Test that registration works when using a pusher worker.", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 247794, + "commit_id": "9d21ecf7ceab55bc19c4457b8b07401b0b1623a7", + "repo": "synapse", + "path": "tests/storage/test_id_generators.py", + "file_name": "test_id_generators.py", + "fun_name": "test_load_existing_stream", + "commit_message": "Add type hints to tests files. (#12256)", + "code": "def test_load_existing_stream(self) -> None:\n \n self._insert_rows(\"foobar1\", \"first\", 3)\n self._insert_rows(\"foobar2\", \"second\", 3)\n self._insert_rows(\"foobar2\", \"second\", 1, update_stream_table=False)\n\n first_id_gen = self._create_id_generator(\"first\", writers=[\"first\", \"second\"])\n second_id_gen = self._create_id_generator(\"second\", writers=[\"first\", \"second\"])\n\n # The first ID gen will notice that it can advance its token to 7 as it\n # has no in progress writes...\n self.assertEqual(first_id_gen.get_positions(), {\"first\": 7, \"second\": 6})\n self.assertEqual(first_id_gen.get_current_token_for_writer(\"first\"), 7)\n self.assertEqual(first_id_gen.get_current_token_for_writer(\"second\"), 6)\n self.assertEqual(first_id_gen.get_persisted_upto_position(), 7)\n\n # ... but the second ID gen doesn't know that.\n self.assertEqual(second_id_gen.get_positions(), {\"first\": 3, \"second\": 7})\n self.assertEqual(second_id_gen.get_current_token_for_writer(\"first\"), 3)\n self.assertEqual(second_id_gen.get_current_token_for_writer(\"second\"), 7)\n self.assertEqual(first_id_gen.get_persisted_upto_position(), 7)\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 198, + "n_words": 79, + "vocab_size": 61, + "complexity": 1, + "nloc": 17, + "token_counts": 190, + "n_ast_nodes": 330, + "n_identifiers": 12, + "d_id": 71927, + "documentation": { + "docstring": "Test creating ID gens with multiple tables that have rows from after\n the position in `stream_positions` table.\n ", + "n_words": 17, + "vocab_size": 17, + "n_whitespaces": 31, + "language": "en" + } + }, + { + "id": 71800, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/admin/tests/test_account_management.py", + "file_name": "test_account_management.py", + "fun_name": "test_notification_preferences_panel_reduced_for_non_moderators", + "commit_message": "Reformat with black", + "code": "def test_notification_preferences_panel_reduced_for_non_moderators(self):\n \n response = self.client.get(reverse(\"wagtailadmin_account\"))\n\n # Find notifications panel through context\n notifications_panel = None\n for panelset in response.context[\"panels_by_tab\"].values():\n for panel in panelset:\n if panel.name == \"notifications\":\n notifications_panel = panel\n break\n\n notifications_form = notifications_panel.get_form()\n self.assertIn(\"approved_notifications\", notifications_form.fields.keys())\n self.assertIn(\"rejected_notifications\", notifications_form.fields.keys())\n self.assertNotIn(\"submitted_notifications\", notifications_form.fields.keys())\n self.assertIn(\n \"updated_comments_notifications\", notifications_form.fields.keys()\n )\n\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 195, + "n_words": 43, + "vocab_size": 33, + "complexity": 4, + "nloc": 15, + "token_counts": 115, + "n_ast_nodes": 200, + "n_identifiers": 18, + "d_id": 15750, + "documentation": { + "docstring": "\n This tests that a user without publish permissions is not shown the\n notification preference for 'submitted' items\n ", + "n_words": 17, + "vocab_size": 17, + "n_whitespaces": 39, + "language": "en" + } + }, + { + "id": 159507, + "commit_id": "9fc462da870f69f9976be3bc081675844b9f64c2", + "repo": "rasa", + "path": "rasa/engine/graph.py", + "file_name": "graph.py", + "fun_name": "as_dict", + "commit_message": "fix type annotation in rasa.engine", + "code": "def as_dict(self) -> Dict[Text, Any]:\n \n serializable_graph_schema: Dict[Text, Dict[Text, Any]] = {\"nodes\": {}}\n for node_name, node in self.nodes.items():\n serializable = dataclasses.asdict(node)\n\n # Classes are not JSON serializable (surprise)\n serializable[\"uses\"] = f\"{node.uses.__module__}.{node.uses.__name__}\"\n\n serializable_graph_schema[\"nodes\"][node_name] = serializable\n\n return serializable_graph_schema\n", + "url": "https://github.com/RasaHQ/rasa.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 107, + "n_words": 35, + "vocab_size": 28, + "complexity": 2, + "nloc": 12, + "token_counts": 72, + "n_ast_nodes": 137, + "n_identifiers": 16, + "d_id": 38299, + "documentation": { + "docstring": "Returns graph schema in a serializable format.\n\n Returns:\n The graph schema in a format which can be dumped as JSON or other formats.\n ", + "n_words": 23, + "vocab_size": 19, + "n_whitespaces": 48, + "language": "en" + } + }, + { + "id": 81965, + "commit_id": "68a44529b6b77d2d43d7099b654560bfd8bbf518", + "repo": "awx", + "path": "awxkit/awxkit/api/pages/page.py", + "file_name": "page.py", + "fun_name": "page_identity", + "commit_message": "Register pages for the Instance peers and install bundle endpoints\n\nThis includes exposing a new interface for Page objects, Page.bytes,\nto return the full bytestring contents of the response.", + "code": "def page_identity(self, response, request_json=None):\n \n request_path = response.request.path_url\n if request_path == '/migrations_notran/':\n raise exc.IsMigrating('You have been redirected to the migration-in-progress page.')\n request_method = response.request.method.lower()\n\n self.last_elapsed = response.elapsed\n\n if isinstance(request_json, dict) and 'ds' in request_json:\n ds = request_json.ds\n else:\n ds = None\n\n data = self.extract_data(response)\n exc_str = \"%s (%s) received\" % (http.responses[response.status_code], response.status_code)\n\n exception = exception_from_status_code(response.status_code)\n if exception:\n raise exception(exc_str, data)\n\n if response.status_code in (http.OK, http.CREATED, http.ACCEPTED):\n\n # Not all JSON responses include a URL. Grab it from the request\n # object, if needed.\n if 'url' in data:\n endpoint = data['url']\n else:\n endpoint = request_path\n\n data = objectify_response_json(response)\n\n if request_method in ('get', 'patch', 'put'):\n # Update existing resource and return it\n if are_same_endpoint(self.endpoint, request_path):\n self.json = data\n self.r = response\n return self\n\n registered_type = get_registered_page(request_path, request_method)\n return registered_type(self.connection, endpoint=endpoint, json=data, last_elapsed=response.elapsed, r=response, ds=ds)\n\n elif response.status_code == http.FORBIDDEN:\n if is_license_invalid(response):\n raise exc.LicenseInvalid(exc_str, data)\n elif is_license_exceeded(response):\n raise exc.LicenseExceeded(exc_str, data)\n else:\n raise exc.Forbidden(exc_str, data)\n\n elif response.status_code == http.BAD_REQUEST:\n if is_license_invalid(response):\n raise exc.LicenseInvalid(exc_str, data)\n if is_duplicate_error(response):\n raise exc.Duplicate(exc_str, data)\n else:\n raise exc.BadRequest(exc_str, data)\n else:\n raise exc.Unknown(exc_str, data)\n", + "url": "https://github.com/ansible/awx.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 694, + "n_words": 171, + "vocab_size": 104, + "complexity": 15, + "nloc": 44, + "token_counts": 337, + "n_ast_nodes": 536, + "n_identifiers": 47, + "d_id": 17285, + "documentation": { + "docstring": "Takes a `requests.Response` and\n returns a new __item_class__ instance if the request method is not a get, or returns\n a __class__ instance if the request path is different than the caller's `endpoint`.\n ", + "n_words": 32, + "vocab_size": 22, + "n_whitespaces": 56, + "language": "en" + } + }, + { + "id": 286399, + "commit_id": "09f753da1c2a2f03c41fe6a3ca2eb79f6ea58995", + "repo": "OpenBBTerminal", + "path": "openbb_terminal/cryptocurrency/overview/overview_controller.py", + "file_name": "overview_controller.py", + "fun_name": "call_categories", + "commit_message": "More Fixes to Crypto + key sort (#3244)\n\n* fix #3095 - autocomplete and command working + key sort\r\n\r\n* fix #3056\r\n\r\n* fix [Bug] bugs #3048\r\n\r\n* fix [Bug] bug #3017\r\n\r\n* sort -> sortby, not ascend, tests\r\n\r\n* fix my goof ups\r\n\r\nCo-authored-by: james ", + "code": "def call_categories(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"categories\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n\n parser.add_argument(\n \"-l\",\n \"--limit\",\n dest=\"limit\",\n type=check_positive,\n help=\"display N number of records\",\n default=15,\n )\n\n parser.add_argument(\n \"-s\",\n \"--sortby\",\n dest=\"sortby\",\n type=str,\n help=\"Sort by given column. Default: market_cap_desc\",\n default=pycoingecko_model.SORT_VALUES[0],\n choices=pycoingecko_model.SORT_VALUES,\n )\n\n parser.add_argument(\n \"--pie\",\n action=\"store_true\",\n help=\"Flag to show pie chart\",\n dest=\"pie\",\n default=False,\n )\n\n ns_parser = self.parse_known_args_and_warn(\n parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED\n )\n if ns_parser:\n pycoingecko_view.display_categories(\n limit=ns_parser.limit,\n export=ns_parser.export,\n sortby=ns_parser.sortby,\n pie=ns_parser.pie,\n )\n\n # TODO: solve sort (similar to losers from discovery)", + "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 494, + "n_words": 72, + "vocab_size": 63, + "complexity": 2, + "nloc": 43, + "token_counts": 161, + "n_ast_nodes": 254, + "n_identifiers": 31, + "d_id": 85786, + "documentation": { + "docstring": "Process top_categories commandShows top cryptocurrency categories by market capitalization. It includes categories like:\n stablecoins, defi, solana ecosystem, polkadot ecosystem and many others.\n You can sort by {}, using --sortby parameter", + "n_words": 30, + "vocab_size": 28, + "n_whitespaces": 51, + "language": "en" + } + }, + { + "id": 273362, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/layers/preprocessing/preprocessing_utils.py", + "file_name": "preprocessing_utils.py", + "fun_name": "listify_tensors", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def listify_tensors(x):\n \n if tf.is_tensor(x):\n x = x.numpy()\n if isinstance(x, np.ndarray):\n x = x.tolist()\n return x\n\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 41, + "n_words": 15, + "vocab_size": 11, + "complexity": 3, + "nloc": 6, + "token_counts": 40, + "n_ast_nodes": 68, + "n_identifiers": 9, + "d_id": 81118, + "documentation": { + "docstring": "Convert any tensors or numpy arrays to lists for config serialization.", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 249472, + "commit_id": "d3d9ca156e323fe194b1bcb1af1628f65a2f3c1c", + "repo": "synapse", + "path": "tests/rest/client/test_keys.py", + "file_name": "test_keys.py", + "fun_name": "test_key_query_cancellation", + "commit_message": "Cancel the processing of key query requests when they time out. (#13680)", + "code": "def test_key_query_cancellation(self) -> None:\n \n self.register_user(\"alice\", \"wonderland\")\n alice_token = self.login(\"alice\", \"wonderland\")\n\n bob = self.register_user(\"bob\", \"uncle\")\n\n channel = make_request_with_cancellation_test(\n \"test_key_query_cancellation\",\n self.reactor,\n self.site,\n \"POST\",\n \"/_matrix/client/r0/keys/query\",\n {\n \"device_keys\": {\n # Empty list means we request keys for all bob's devices\n bob: [],\n },\n },\n token=alice_token,\n )\n\n self.assertEqual(200, channel.code, msg=channel.result[\"body\"])\n self.assertIn(bob, channel.json_body[\"device_keys\"])\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 259, + "n_words": 47, + "vocab_size": 42, + "complexity": 1, + "nloc": 23, + "token_counts": 104, + "n_ast_nodes": 177, + "n_identifiers": 17, + "d_id": 72939, + "documentation": { + "docstring": "\n Tests that /keys/query is cancellable and does not swallow the\n CancelledError.\n ", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 33, + "language": "en" + } + }, + { + "id": 195664, + "commit_id": "5fc97f8ef40cbc9363c7f7e0ff25f12c45a2203e", + "repo": "sympy", + "path": "sympy/simplify/trigsimp.py", + "file_name": "trigsimp.py", + "fun_name": "trigsimp", + "commit_message": "implemented inverse option for trigsimp", + "code": "def trigsimp(expr, inverse=False, **opts):\n \n from sympy.simplify.fu import fu\n\n expr = sympify(expr)\n\n _eval_trigsimp = getattr(expr, '_eval_trigsimp', None)\n if _eval_trigsimp is not None:\n return _eval_trigsimp(**opts)\n\n old = opts.pop('old', False)\n if not old:\n opts.pop('deep', None)\n opts.pop('recursive', None)\n method = opts.pop('method', 'matching')\n else:\n method = 'old'\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 101, + "n_words": 42, + "vocab_size": 32, + "complexity": 4, + "nloc": 26, + "token_counts": 202, + "n_ast_nodes": 160, + "n_identifiers": 13, + "d_id": 47355, + "documentation": { + "docstring": "Returns a reduced expression by using known trig identities.\n\n Parameters\n ==========\n\n inverse : bool, optional\n If ``inverse=True``, it will be assumed that a composition of inverse\n functions, such as sin and asin, can be cancelled in any order.\n For example, ``asin(sin(x))`` will yield ``x`` without checking whether\n x belongs to the set where this relation is true. The default is False.\n Default : True\n\n method : string, optional\n Specifies the method to use. Valid choices are:\n\n - ``'matching'``, default\n - ``'groebner'``\n - ``'combined'``\n - ``'fu'``\n - ``'old'``\n\n If ``'matching'``, simplify the expression recursively by targeting\n common patterns. If ``'groebner'``, apply an experimental groebner\n basis algorithm. In this case further options are forwarded to\n ``trigsimp_groebner``, please refer to\n its docstring. If ``'combined'``, it first runs the groebner basis\n algorithm with small default parameters, then runs the ``'matching'``\n algorithm. If ``'fu'``, run the collection of trigonometric\n transformations described by Fu, et al. (see the\n :py:func:`~sympy.simplify.fu.fu` docstring). If ``'old'``, the original\n SymPy trig simplification function is run.\n opts :\n Optional keyword arguments passed to the method. See each method's\n function docstring for details.\n\n Examples\n ========\n\n >>> from sympy import trigsimp, sin, cos, log\n >>> from sympy.abc import x\n >>> e = 2*sin(x)**2 + 2*cos(x)**2\n >>> trigsimp(e)\n 2\n\n Simplification occurs wherever trigonometric functions are located.\n\n >>> trigsimp(log(e))\n log(2)\n\n Using ``method='groebner'`` (or ``method='combined'``) might lead to\n greater simplification.\n\n The old trigsimp routine can be accessed as with method ``method='old'``.\n\n >>> from sympy import coth, tanh\n >>> t = 3*tanh(x)**7 - 2/coth(x)**7\n >>> trigsimp(t, method='old') == t\n True\n >>> trigsimp(t)\n tanh(x)**7\n\n ", + "n_words": 255, + "vocab_size": 181, + "n_whitespaces": 491, + "language": "en" + } + }, + { + "id": 47330, + "commit_id": "d8889da29ccfcbecd2c89b9e8e278c480767d678", + "repo": "airflow", + "path": "airflow/utils/sqlalchemy.py", + "file_name": "sqlalchemy.py", + "fun_name": "db_supports_json", + "commit_message": "Move the database configuration to a new section (#22284)\n\nCo-authored-by: gitstart-airflow \r\nCo-authored-by: GitStart <1501599+gitstart@users.noreply.github.com>\r\nCo-authored-by: Egbosi Kelechi ", + "code": "def db_supports_json(self):\n \n return not conf.get(\"database\", \"sql_alchemy_conn\").startswith(\"mssql\")\n", + "url": "https://github.com/apache/airflow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 20, + "n_words": 6, + "vocab_size": 6, + "complexity": 1, + "nloc": 2, + "token_counts": 21, + "n_ast_nodes": 42, + "n_identifiers": 5, + "d_id": 9072, + "documentation": { + "docstring": "Checks if the database supports JSON (i.e. is NOT MSSQL)", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 60232, + "commit_id": "cc4d0564756ca067516f71718a3d135996525909", + "repo": "transferlearning", + "path": "code/deep/BJMMD/caffe/python/caffe/coord_map.py", + "file_name": "coord_map.py", + "fun_name": "coord_map_from_to", + "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", + "code": "def coord_map_from_to(top_from, top_to):\n \n # We need to find a common ancestor of top_from and top_to.\n # We'll assume that all ancestors are equivalent here (otherwise the graph\n # is an inconsistent state (which we could improve this to check for)).\n # For now use a brute-force algorithm.\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 6, + "n_whitespaces": 62, + "n_words": 47, + "vocab_size": 42, + "complexity": 8, + "nloc": 28, + "token_counts": 177, + "n_ast_nodes": 19, + "n_identifiers": 3, + "d_id": 12025, + "documentation": { + "docstring": "\n Determine the coordinate mapping betweeen a top (from) and a top (to).\n Walk the graph to find a common ancestor while composing the coord maps for\n from and to until they meet. As a last step the from map is inverted.\n ", + "n_words": 41, + "vocab_size": 31, + "n_whitespaces": 54, + "language": "en" + } + }, + { + "id": 151197, + "commit_id": "86aa875bc9d5edeba04f908fe45b011e52045c83", + "repo": "freqtrade", + "path": "freqtrade/freqai/utils.py", + "file_name": "utils.py", + "fun_name": "plot_feature_importance", + "commit_message": "plot features as html instead of png", + "code": "def plot_feature_importance(model, feature_names, pair, train_dir, count_max=50) -> None:\n \n try:\n import plotly.graph_objects as go\n from plotly.subplots import make_subplots\n except ImportError:\n logger.exception(\"Module plotly not found \\n Please install using `pip3 install plotly`\")\n exit(1)\n\n from freqtrade.plot.plotting import store_plot_file\n\n # Gather feature importance from model\n if \"catboost.core\" in str(model.__class__):\n feature_importance = model.get_feature_importance()\n elif \"lightgbm.sklearn\" in str(model.__class__):\n feature_importance = model.feature_importances_\n else:\n raise NotImplementedError(f\"Cannot extract feature importance for {model.__class__}\")\n\n # Data preparation\n fi_df = pd.DataFrame({\n \"feature_names\": np.array(feature_names),\n \"feature_importance\": np.array(feature_importance)\n })\n fi_df_top = fi_df.nlargest(count_max, \"feature_importance\")[::-1]\n fi_df_worst = fi_df.nsmallest(count_max, \"feature_importance\")[::-1]\n\n # Plotting", + "url": "https://github.com/freqtrade/freqtrade.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 189, + "n_words": 84, + "vocab_size": 67, + "complexity": 4, + "nloc": 37, + "token_counts": 229, + "n_ast_nodes": 261, + "n_identifiers": 34, + "d_id": 34972, + "documentation": { + "docstring": "\n Plot Best and Worst Features by importance for CatBoost model.\n Called once per sub-train.\n Usage: plot_feature_importance(\n model=model,\n feature_names=dk.training_features_list,\n pair=pair,\n train_dir=dk.data_path)\n ", + "n_words": 20, + "vocab_size": 20, + "n_whitespaces": 89, + "language": "en" + } + }, + { + "id": 138395, + "commit_id": "30ab5458a7e4ba2351d5e1beef8c8797b5946493", + "repo": "ray", + "path": "dashboard/state_aggregator.py", + "file_name": "state_aggregator.py", + "fun_name": "get_objects", + "commit_message": "[State Observability] Tasks and Objects API (#23912)\n\nThis PR implements ray list tasks and ray list objects APIs.\r\n\r\nNOTE: You can ignore the merge conflict for now. It is because the first PR was reverted. There's a fix PR open now.", + "code": "async def get_objects(self) -> dict:\n \n replies = await asyncio.gather(\n *[\n self._client.get_object_info(node_id, timeout=DEFAULT_RPC_TIMEOUT)\n for node_id in self._client.get_all_registered_raylet_ids()\n ]\n )\n\n worker_stats = []\n for reply in replies:\n for core_worker_stat in reply.core_workers_stats:\n # NOTE: Set preserving_proto_field_name=False here because\n # `construct_memory_table` requires a dictionary that has\n # modified protobuf name\n # (e.g., workerId instead of worker_id) as a key.\n worker_stats.append(\n self._message_to_dict(\n message=core_worker_stat,\n fields_to_decode=[\"object_id\"],\n preserving_proto_field_name=False,\n )\n )\n result = {}\n memory_table = memory_utils.construct_memory_table(worker_stats)\n for entry in memory_table.table:\n data = entry.as_dict()\n # `construct_memory_table` returns object_ref field which is indeed\n # object_id. We do transformation here.\n # TODO(sang): Refactor `construct_memory_table`.\n data[\"object_id\"] = data[\"object_ref\"]\n del data[\"object_ref\"]\n data = filter_fields(data, ObjectState)\n result[data[\"object_id\"]] = data\n return result\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 518, + "n_words": 107, + "vocab_size": 80, + "complexity": 5, + "nloc": 32, + "token_counts": 140, + "n_ast_nodes": 234, + "n_identifiers": 31, + "d_id": 31404, + "documentation": { + "docstring": "List all object information from the cluster.\n\n Returns:\n {object_id -> object_data_in_dict}\n object_data_in_dict's schema is in ObjectState\n ", + "n_words": 16, + "vocab_size": 16, + "n_whitespaces": 52, + "language": "en" + } + }, + { + "id": 63729, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_vendor/six.py", + "file_name": "six.py", + "fun_name": "exec_", + "commit_message": "upd; format", + "code": "def exec_(_code_, _globs_=None, _locs_=None):\n \n if _globs_ is None:\n frame = sys._getframe(1)\n _globs_ = frame.f_globals\n if _locs_ is None:\n _locs_ = frame.f_locals\n del frame\n elif _locs_ is None:\n _locs_ = _globs_\n exec()\n\n exec_()\n\n\nif sys.version_info[:2] > (3,):\n exec_()\nelse:", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 140, + "n_words": 38, + "vocab_size": 22, + "complexity": 4, + "nloc": 10, + "token_counts": 56, + "n_ast_nodes": 136, + "n_identifiers": 11, + "d_id": 13487, + "documentation": { + "docstring": "Execute code in a namespace.exec _code_ in _globs_, _locs_def reraise(tp, value, tb=None):\n try:\n raise tp, value, tb\n finally:\n tb = None\ndef raise_from(value, from_value):\n try:\n raise value from from_value\n finally:\n value = None\n", + "n_words": 33, + "vocab_size": 24, + "n_whitespaces": 71, + "language": "en" + } + }, + { + "id": 264128, + "commit_id": "fa1e28e860c4bdb3e585a968bd248a2ac666e1f6", + "repo": "netbox", + "path": "netbox/extras/tests/test_customfields.py", + "file_name": "test_customfields.py", + "fun_name": "test_create_single_object_with_values", + "commit_message": "Initial work on #7006", + "code": "def test_create_single_object_with_values(self):\n \n data = {\n 'name': 'Site 3',\n 'slug': 'site-3',\n 'custom_fields': {\n 'text_field': 'bar',\n 'longtext_field': 'blah blah blah',\n 'number_field': 456,\n 'boolean_field': True,\n 'date_field': '2020-01-02',\n 'url_field': 'http://example.com/2',\n 'json_field': '{\"foo\": 1, \"bar\": 2}',\n 'choice_field': 'Bar',\n 'object_field': VLAN.objects.get(vid=2).pk,\n },\n }\n url = reverse('dcim-api:site-list')\n self.add_permissions('dcim.add_site')\n\n response = self.client.post(url, data, format='json', **self.header)\n self.assertHttpStatus(response, status.HTTP_201_CREATED)\n\n # Validate response data\n response_cf = response.data['custom_fields']\n data_cf = data['custom_fields']\n self.assertEqual(response_cf['text_field'], data_cf['text_field'])\n self.assertEqual(response_cf['longtext_field'], data_cf['longtext_field'])\n self.assertEqual(response_cf['number_field'], data_cf['number_field'])\n self.assertEqual(response_cf['boolean_field'], data_cf['boolean_field'])\n self.assertEqual(response_cf['date_field'], data_cf['date_field'])\n self.assertEqual(response_cf['url_field'], data_cf['url_field'])\n self.assertEqual(response_cf['json_field'], data_cf['json_field'])\n self.assertEqual(response_cf['choice_field'], data_cf['choice_field'])\n self.assertEqual(response_cf['object_field']['id'], data_cf['object_field'])\n\n # Validate database data\n site = Site.objects.get(pk=response.data['id'])\n self.assertEqual(site.custom_field_data['text_field'], data_cf['text_field'])\n self.assertEqual(site.custom_field_data['longtext_field'], data_cf['longtext_field'])\n self.assertEqual(site.custom_field_data['number_field'], data_cf['number_field'])\n self.assertEqual(site.custom_field_data['boolean_field'], data_cf['boolean_field'])\n self.assertEqual(str(site.custom_field_data['date_field']), data_cf['date_field'])\n self.assertEqual(site.custom_field_data['url_field'], data_cf['url_field'])\n self.assertEqual(site.custom_field_data['json_field'], data_cf['json_field'])\n self.assertEqual(site.custom_field_data['choice_field'], data_cf['choice_field'])\n self.assertEqual(site.custom_field_data['object_field'], data_cf['object_field'])\n", + "url": "https://github.com/netbox-community/netbox.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 491, + "n_words": 102, + "vocab_size": 82, + "complexity": 1, + "nloc": 41, + "token_counts": 420, + "n_ast_nodes": 735, + "n_identifiers": 26, + "d_id": 77615, + "documentation": { + "docstring": "\n Create a single new site with a value for each type of custom field.\n ", + "n_words": 14, + "vocab_size": 13, + "n_whitespaces": 29, + "language": "en" + } + }, + { + "id": 155537, + "commit_id": "336aac39ee8a616ac2645e532392123ae1bfddd1", + "repo": "dask", + "path": "dask/dataframe/groupby.py", + "file_name": "groupby.py", + "fun_name": "shift", + "commit_message": "Add groupby shift method (#8522)\n\nImplements the shift `method` following the `transform` and `apply` methods.", + "code": "def shift(self, periods=1, freq=None, axis=0, fill_value=None, meta=no_default):\n \n if meta is no_default:\n with raise_on_meta_error(\"groupby.shift()\", udf=False):\n meta_kwargs = _extract_meta(\n {\n \"periods\": periods,\n \"freq\": freq,\n \"axis\": axis,\n \"fill_value\": fill_value,\n },\n nonempty=True,\n )\n meta = self._meta_nonempty.shift(**meta_kwargs)\n\n msg = (\n \"`meta` is not specified, inferred from partial data. \"\n \"Please provide `meta` if the result is unexpected.\\n\"\n \" Before: .shift(1)\\n\"\n \" After: .shift(1, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\\n\"\n \" or: .shift(1, meta=('x', 'f8')) for series result\"\n )\n warnings.warn(msg, stacklevel=2)\n\n meta = make_meta(meta, parent_meta=self._meta.obj)\n\n # Validate self.by\n if isinstance(self.by, list) and any(\n isinstance(item, Series) for item in self.by\n ):\n raise NotImplementedError(\n \"groupby-shift with a multiple Series is currently not supported\"\n )\n df = self.obj\n should_shuffle = not (df.known_divisions and df._contains_index_name(self.by))\n\n if should_shuffle:\n df2, by = self._shuffle(meta)\n else:\n df2 = df\n by = self.by\n\n # Perform embarrassingly parallel groupby-shift\n result = map_partitions(\n _groupby_slice_shift,\n df2,\n by,\n self._slice,\n periods=periods,\n freq=freq,\n axis=axis,\n fill_value=fill_value,\n token=\"groupby-shift\",\n group_keys=self.group_keys,\n meta=meta,\n **self.observed,\n **self.dropna,\n )\n return result\n", + "url": "https://github.com/dask/dask.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 807, + "n_words": 153, + "vocab_size": 115, + "complexity": 7, + "nloc": 51, + "token_counts": 246, + "n_ast_nodes": 391, + "n_identifiers": 43, + "d_id": 36420, + "documentation": { + "docstring": "Parallel version of pandas GroupBy.shift\n\n This mimics the pandas version except for the following:\n\n If the grouper does not align with the index then this causes a full\n shuffle. The order of rows within each group may not be preserved.\n\n Parameters\n ----------\n periods : Delayed, Scalar or int, default 1\n Number of periods to shift.\n freq : Delayed, Scalar or str, optional\n Frequency string.\n axis : axis to shift, default 0\n Shift direction.\n fill_value : Scalar, Delayed or object, optional\n The scalar value to use for newly introduced missing values.\n $META\n\n Returns\n -------\n shifted : Series or DataFrame shifted within each group.\n\n Examples\n --------\n >>> import dask\n >>> ddf = dask.datasets.timeseries(freq=\"1H\")\n >>> result = ddf.groupby(\"name\").shift(1, meta={\"id\": int, \"x\": float, \"y\": float})\n ", + "n_words": 121, + "vocab_size": 89, + "n_whitespaces": 299, + "language": "en" + } + }, + { + "id": 70108, + "commit_id": "917f01a8306055b21437deac35333dddd1210e39", + "repo": "glances", + "path": "glances/processes.py", + "file_name": "processes.py", + "fun_name": "nice_decrease", + "commit_message": "Update formater in the Makefile with flake8 and autopep8/autoflake", + "code": "def nice_decrease(self, pid):\n \n p = psutil.Process(pid)\n try:\n p.nice(p.nice() - 1)\n logger.info('Set nice level of process {} to {} (higher the priority)'.format(pid, p.nice()))\n except psutil.AccessDenied:\n logger.warning(\n 'Can not decrease (higher the priority) the nice level of process {} (access denied)'.format(pid)\n )\n", + "url": "https://github.com/nicolargo/glances.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 127, + "n_words": 40, + "vocab_size": 31, + "complexity": 2, + "nloc": 9, + "token_counts": 63, + "n_ast_nodes": 109, + "n_identifiers": 12, + "d_id": 15339, + "documentation": { + "docstring": "Decrease nice level\n On UNIX this is a number which usually goes from -20 to 20.\n The higher the nice value, the lower the priority of the process.", + "n_words": 28, + "vocab_size": 24, + "n_whitespaces": 41, + "language": "en" + } + }, + { + "id": 100267, + "commit_id": "444762114c1b1ad2e72c871e825373bd74880aba", + "repo": "faceswap", + "path": "lib/gpu_stats.py", + "file_name": "gpu_stats.py", + "fun_name": "_get_device_count", + "commit_message": "Initial somewhat working version", + "code": "def _get_device_count(self):\n \n if self._is_plaidml:\n self._device_count = self._plaid.device_count\n elif IS_MACOS:\n self._device_count = metal.get_device_count()\n else:\n try:\n self._device_count = pynvml.nvmlDeviceGetCount()\n except pynvml.NVMLError:\n self._device_count = 0\n self._log(\"debug\", \"GPU Device count: {}\".format(self._device_count))\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 136, + "n_words": 27, + "vocab_size": 21, + "complexity": 4, + "nloc": 11, + "token_counts": 70, + "n_ast_nodes": 121, + "n_identifiers": 14, + "d_id": 19777, + "documentation": { + "docstring": " Detect the number of GPUs attached to the system and allocate to\n :attr:`_device_count`. ", + "n_words": 13, + "vocab_size": 11, + "n_whitespaces": 21, + "language": "en" + } + }, + { + "id": 150869, + "commit_id": "2b5f0678772bea0abaf4abe93efc55de43ea3e0e", + "repo": "freqtrade", + "path": "freqtrade/rpc/rpc.py", + "file_name": "rpc.py", + "fun_name": "_handle_analyzed_df_message", + "commit_message": "Refactoring, minor improvements, data provider improvements", + "code": "def _handle_analyzed_df_message(self, type, data):\n \n key, value = data[\"key\"], data[\"value\"]\n pair, timeframe, candle_type = key\n\n # Skip any pairs that we don't have in the pairlist?\n # leader_pairlist = self._freqtrade.pairlists._whitelist\n # if pair not in leader_pairlist:\n # return\n\n dataframe = json_to_dataframe(value)\n\n if self._config.get('external_signal', {}).get('remove_signals_analyzed_df', False):\n dataframe = remove_entry_exit_signals(dataframe)\n\n logger.debug(f\"Handling analyzed dataframe for {pair}\")\n logger.debug(dataframe.tail())\n\n # Add the dataframe to the dataprovider\n dataprovider = self._freqtrade.dataprovider\n dataprovider.add_external_df(pair, timeframe, dataframe, candle_type)\n", + "url": "https://github.com/freqtrade/freqtrade.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 180, + "n_words": 67, + "vocab_size": 49, + "complexity": 2, + "nloc": 10, + "token_counts": 98, + "n_ast_nodes": 169, + "n_identifiers": 20, + "d_id": 34859, + "documentation": { + "docstring": "\n Handles the analyzed dataframes from the Leaders\n\n :param type: The data_type of the data\n :param data: The data\n ", + "n_words": 18, + "vocab_size": 13, + "n_whitespaces": 47, + "language": "en" + } + }, + { + "id": 242037, + "commit_id": "3a3727c022a361a0bc8a519ebc60e7de8124a5d9", + "repo": "scipy", + "path": "scipy/stats/_levy_stable/__init__.py", + "file_name": "__init__.py", + "fun_name": "pdf", + "commit_message": "DOC: stats: add levy_stable pdf/cdf/rvs docstring", + "code": "def pdf(self, x, *args, **kwds):\n \n # override base class version to correct\n # location for S1 parameterization\n if self._parameterization() == \"S0\":\n return super().pdf(x, *args, **kwds)\n elif self._parameterization() == \"S1\":\n (alpha, beta), delta, gamma = self._parse_args(*args, **kwds)\n if np.all(np.reshape(alpha, (1, -1))[0, :] != 1):\n return super().pdf(x, *args, **kwds)\n else:\n # correct location for this parameterisation\n x = np.reshape(x, (1, -1))[0, :]\n x, alpha, beta = np.broadcast_arrays(x, alpha, beta)\n\n data_in = np.dstack((x, alpha, beta))[0]\n data_out = np.empty(shape=(len(data_in), 1))\n # group data in unique arrays of alpha, beta pairs\n uniq_param_pairs = np.unique(data_in[:, 1:], axis=0)\n for pair in uniq_param_pairs:\n _alpha, _beta = pair\n _delta = (\n delta + 2 * _beta * gamma * np.log(gamma) / np.pi\n if _alpha == 1.0\n else delta\n )\n data_mask = np.all(data_in[:, 1:] == pair, axis=-1)\n _x = data_in[data_mask, 0]\n data_out[data_mask] = (\n super()\n .pdf(_x, _alpha, _beta, loc=_delta, scale=gamma)\n .reshape(len(_x), 1)\n )\n output = data_out.T[0]\n if output.shape == (1,):\n return output[0]\n return output\n", + "url": "https://github.com/scipy/scipy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 21, + "n_whitespaces": 703, + "n_words": 154, + "vocab_size": 101, + "complexity": 7, + "nloc": 31, + "token_counts": 330, + "n_ast_nodes": 498, + "n_identifiers": 37, + "d_id": 69756, + "documentation": { + "docstring": "Probability density function of the Levy-stable distribution\n\n Parameters\n ----------\n x : array_like\n quantiles\n alpha, beta : array_like\n The shape parameters of the distribution. See the `levy_stable`\n object docstring for more information.\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n pdf : ndarray\n Probability density function evaluated at x\n ", + "n_words": 56, + "vocab_size": 40, + "n_whitespaces": 192, + "language": "en" + } + }, + { + "id": 156466, + "commit_id": "b946406a30cd12cd6989df3440011a734441a200", + "repo": "dask", + "path": "dask/dataframe/io/orc/core.py", + "file_name": "core.py", + "fun_name": "project_columns", + "commit_message": "Add from_map function to Dask-DataFrame (#8911)", + "code": "def project_columns(self, columns):\n \n if columns == self.columns:\n return self\n func = copy.deepcopy(self)\n func._columns = columns\n return func\n", + "url": "https://github.com/dask/dask.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 63, + "n_words": 17, + "vocab_size": 13, + "complexity": 2, + "nloc": 6, + "token_counts": 32, + "n_ast_nodes": 53, + "n_identifiers": 7, + "d_id": 36647, + "documentation": { + "docstring": "Return a new ORCFunctionWrapper object with\n a sub-column projection.\n ", + "n_words": 9, + "vocab_size": 8, + "n_whitespaces": 23, + "language": "en" + } + }, + { + "id": 113257, + "commit_id": "97d067e614243f06ed1f8e2d389512977fff8828", + "repo": "nni", + "path": "nni/compression/pytorch/speedup/compress_modules.py", + "file_name": "compress_modules.py", + "fun_name": "replace_embedding", + "commit_message": "Speedup enhancement (#4925)", + "code": "def replace_embedding(embedding, masks):\n \n # currently we donnot support replace the embedding layer\n # because we donnot have the corressponding pruner\n return embedding\n\n", + "url": "https://github.com/microsoft/nni.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 6, + "n_whitespaces": 34, + "n_words": 22, + "vocab_size": 17, + "complexity": 1, + "nloc": 2, + "token_counts": 10, + "n_ast_nodes": 20, + "n_identifiers": 3, + "d_id": 24867, + "documentation": { + "docstring": "\n Replace the embedding layer according the infered masks.\n We replace the embedding layer according the weight masks,\n ", + "n_words": 17, + "vocab_size": 11, + "n_whitespaces": 27, + "language": "en" + } + }, + { + "id": 76705, + "commit_id": "470d39e1fe86084f729997f7c4e13f551e7e8c73", + "repo": "wagtail", + "path": "wagtail/admin/panels.py", + "file_name": "panels.py", + "fun_name": "get_edit_handler", + "commit_message": "Split out bind_to(model) into a separate bind_to_model method", + "code": "def get_edit_handler(cls):\n \n if hasattr(cls, \"edit_handler\"):\n edit_handler = cls.edit_handler\n else:\n # construct a TabbedInterface made up of content_panels, promote_panels\n # and settings_panels, skipping any which are empty\n tabs = []\n\n if cls.content_panels:\n tabs.append(ObjectList(cls.content_panels, heading=gettext_lazy(\"Content\")))\n if cls.promote_panels:\n tabs.append(ObjectList(cls.promote_panels, heading=gettext_lazy(\"Promote\")))\n if cls.settings_panels:\n tabs.append(\n ObjectList(\n cls.settings_panels,\n heading=gettext_lazy(\"Settings\"),\n classname=\"settings\",\n )\n )\n\n edit_handler = TabbedInterface(tabs, base_form_class=cls.base_form_class)\n\n return edit_handler.bind_to_model(cls)\n\n\nPage.get_edit_handler = get_edit_handler\n\n\n@receiver(setting_changed)", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "@receiver(setting_changed)", + "n_ast_errors": 1, + "ast_levels": 18, + "n_whitespaces": 253, + "n_words": 56, + "vocab_size": 47, + "complexity": 5, + "nloc": 19, + "token_counts": 118, + "n_ast_nodes": 216, + "n_identifiers": 19, + "d_id": 16572, + "documentation": { + "docstring": "\n Get the panel to use in the Wagtail admin when editing this page type.\n ", + "n_words": 14, + "vocab_size": 13, + "n_whitespaces": 21, + "language": "en" + } + }, + { + "id": 177116, + "commit_id": "4a019f04d0e304ecd2f28b15d854e1282e03461d", + "repo": "networkx", + "path": "networkx/algorithms/traversal/breadth_first_search.py", + "file_name": "breadth_first_search.py", + "fun_name": "descendants_at_distance", + "commit_message": "Adds ```nx.bfs_layers``` method (#5879)\n\n* reformatted the files\r\n\r\n* reformatted the files\r\n\r\n* added final changes\r\n\r\n* changed descendants_at_distance\r\n\r\n* fixed comment in bfs_layers\r\n\r\n* fixed comment in bfs_layers", + "code": "def descendants_at_distance(G, source, distance):\n \n if source not in G:\n raise nx.NetworkXError(f\"The node {source} is not in the graph.\")\n\n bfs_generator = nx.bfs_layers(G, source)\n for i, layer in enumerate(bfs_generator):\n if i == distance:\n return set(layer)\n return set()\n", + "url": "https://github.com/networkx/networkx.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 75, + "n_words": 35, + "vocab_size": 30, + "complexity": 4, + "nloc": 8, + "token_counts": 58, + "n_ast_nodes": 96, + "n_identifiers": 12, + "d_id": 42274, + "documentation": { + "docstring": "Returns all nodes at a fixed `distance` from `source` in `G`.\n\n Parameters\n ----------\n G : NetworkX graph\n A graph\n source : node in `G`\n distance : the distance of the wanted nodes from `source`\n\n Returns\n -------\n set()\n The descendants of `source` in `G` at the given `distance` from `source`\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> nx.descendants_at_distance(G, 2, 2)\n {0, 4}\n >>> H = nx.DiGraph()\n >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)])\n >>> nx.descendants_at_distance(H, 0, 2)\n {3, 4, 5, 6}\n >>> nx.descendants_at_distance(H, 5, 0)\n {5}\n >>> nx.descendants_at_distance(H, 5, 1)\n set()\n ", + "n_words": 96, + "vocab_size": 61, + "n_whitespaces": 176, + "language": "en" + } + }, + { + "id": 249080, + "commit_id": "c97042f7eef3748e17c90e48a4122389a89c4735", + "repo": "synapse", + "path": "tests/rest/admin/test_device.py", + "file_name": "test_device.py", + "fun_name": "test_unknown_device", + "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13469)", + "code": "def test_unknown_device(self) -> None:\n \n url = \"/_synapse/admin/v2/users/%s/devices/unknown_device\" % urllib.parse.quote(\n self.other_user\n )\n\n channel = self.make_request(\n \"GET\",\n url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])\n\n channel = self.make_request(\n \"PUT\",\n url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n\n channel = self.make_request(\n \"DELETE\",\n url,\n access_token=self.admin_user_tok,\n )\n\n # Delete unknown device returns status 200\n self.assertEqual(200, channel.code, msg=channel.json_body)\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 258, + "n_words": 50, + "vocab_size": 31, + "complexity": 1, + "nloc": 26, + "token_counts": 138, + "n_ast_nodes": 215, + "n_identifiers": 18, + "d_id": 72587, + "documentation": { + "docstring": "\n Tests that a lookup for a device that does not exist returns either HTTPStatus.NOT_FOUND or 200.\n ", + "n_words": 16, + "vocab_size": 14, + "n_whitespaces": 31, + "language": "en" + } + }, + { + "id": 30157, + "commit_id": "fa2ad657482aca9dc628e6d7062b8badf2706bb6", + "repo": "spotify-downloader", + "path": "tests/utils/test_config.py", + "file_name": "test_config.py", + "fun_name": "test_get_cache_path", + "commit_message": "v4 init", + "code": "def test_get_cache_path(setup):\n \n\n assert get_cache_path() == Path(setup.directory, \".spotdl\", \".spotipy\")\n\n", + "url": "https://github.com/spotDL/spotify-downloader.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 14, + "n_words": 8, + "vocab_size": 8, + "complexity": 1, + "nloc": 2, + "token_counts": 21, + "n_ast_nodes": 39, + "n_identifiers": 5, + "d_id": 5356, + "documentation": { + "docstring": "\n Tests if the path to the cache file is correct.\n ", + "n_words": 10, + "vocab_size": 9, + "n_whitespaces": 17, + "language": "en" + } + }, + { + "id": 60149, + "commit_id": "9ab65f6480a31ba022d9846fdfbfca1d17da8164", + "repo": "prefect", + "path": "src/prefect/infrastructure/kubernetes.py", + "file_name": "kubernetes.py", + "fun_name": "_get_cluster_uid", + "commit_message": "Add `PREFECT_KUBERNETES_CLUSTER_UID` to allow bypass of `kube-system` namespace read (#7864)\n\nCo-authored-by: Peyton <44583861+peytonrunyan@users.noreply.github.com>", + "code": "def _get_cluster_uid(self) -> str:\n \n # Default to an environment variable\n env_cluster_uid = os.environ.get(\"PREFECT_KUBERNETES_CLUSTER_UID\")\n if env_cluster_uid:\n return env_cluster_uid\n\n # Read the UID from the cluster namespace\n with self.get_client() as client:\n namespace = client.read_namespace(\"kube-system\")\n cluster_uid = namespace.metadata.uid\n\n return cluster_uid\n", + "url": "https://github.com/PrefectHQ/prefect.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 115, + "n_words": 37, + "vocab_size": 29, + "complexity": 2, + "nloc": 21, + "token_counts": 49, + "n_ast_nodes": 91, + "n_identifiers": 14, + "d_id": 11992, + "documentation": { + "docstring": "\n Gets a unique id for the current cluster being used.\n\n There is no real unique identifier for a cluster. However, the `kube-system`\n namespace is immutable and has a persistence UID that we use instead.\n\n PREFECT_KUBERNETES_CLUSTER_UID can be set in cases where the `kube-system`\n namespace cannot be read e.g. when a cluster role cannot be created. If set,\n this variable will be used and we will not attempt to read the `kube-system`\n namespace.\n\n See https://github.com/kubernetes/kubernetes/issues/44954\n ", + "n_words": 74, + "vocab_size": 53, + "n_whitespaces": 138, + "language": "en" + } + }, + { + "id": 270593, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/dtensor/layout_map.py", + "file_name": "layout_map.py", + "fun_name": "get_default_mesh", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def get_default_mesh(self):\n \n return self._default_mesh\n\n\nLayoutMap.get.__doc__ = LayoutMap.__getitem__.__doc__\n\n\n@keras_export(\"keras.dtensor.experimental.layout_map_scope\", v1=[])\n@contextlib.contextmanager", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "@keras_export(\"keras.dtensor.experimental.layout_map_scope\", v1=[])\n@contextlib.contextmanager", + "n_ast_errors": 1, + "ast_levels": 8, + "n_whitespaces": 21, + "n_words": 10, + "vocab_size": 10, + "complexity": 1, + "nloc": 2, + "token_counts": 10, + "n_ast_nodes": 60, + "n_identifiers": 11, + "d_id": 80492, + "documentation": { + "docstring": "Return the default `Mesh` set at instance creation.\n\n The `Mesh` can be used to create default replicated `Layout` when there\n isn't a match of the input string query.\n ", + "n_words": 28, + "vocab_size": 25, + "n_whitespaces": 49, + "language": "en" + } + }, + { + "id": 306974, + "commit_id": "52b5e1779f1ed6e5005dc0bdff4137040d7216fb", + "repo": "core", + "path": "homeassistant/components/philips_js/media_player.py", + "file_name": "media_player.py", + "fun_name": "state", + "commit_message": "Use new media player enums [p] (#78058)", + "code": "def state(self) -> MediaPlayerState:\n \n if self._tv.on and (self._tv.powerstate == \"On\" or self._tv.powerstate is None):\n return MediaPlayerState.ON\n return MediaPlayerState.OFF\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 50, + "n_words": 18, + "vocab_size": 17, + "complexity": 4, + "nloc": 5, + "token_counts": 41, + "n_ast_nodes": 68, + "n_identifiers": 8, + "d_id": 105754, + "documentation": { + "docstring": "Get the device state. An exception means OFF state.", + "n_words": 9, + "vocab_size": 8, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 275788, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/preprocessing/text.py", + "file_name": "text.py", + "fun_name": "to_json", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def to_json(self, **kwargs):\n \n config = self.get_config()\n tokenizer_config = {\n \"class_name\": self.__class__.__name__,\n \"config\": config,\n }\n return json.dumps(tokenizer_config, **kwargs)\n\n\n@keras_export(\"keras.preprocessing.text.tokenizer_from_json\")", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "@keras_export(\"keras.preprocessing.text.tokenizer_from_json\")", + "n_ast_errors": 1, + "ast_levels": 10, + "n_whitespaces": 74, + "n_words": 18, + "vocab_size": 17, + "complexity": 1, + "nloc": 7, + "token_counts": 42, + "n_ast_nodes": 82, + "n_identifiers": 11, + "d_id": 81467, + "documentation": { + "docstring": "Returns a JSON string containing the tokenizer configuration.\n\n To load a tokenizer from a JSON string, use\n `keras.preprocessing.text.tokenizer_from_json(json_string)`.\n\n Args:\n **kwargs: Additional keyword arguments\n to be passed to `json.dumps()`.\n\n Returns:\n A JSON string containing the tokenizer configuration.\n ", + "n_words": 36, + "vocab_size": 25, + "n_whitespaces": 108, + "language": "en" + } + }, + { + "id": 207766, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "tests/admin_views/tests.py", + "file_name": "tests.py", + "fun_name": "test_delete_view", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def test_delete_view(self):\n \n delete_dict = {\"post\": \"yes\"}\n delete_url = reverse(\"admin:admin_views_article_delete\", args=(self.a1.pk,))\n\n # add user should not be able to delete articles\n self.client.force_login(self.adduser)\n response = self.client.get(delete_url)\n self.assertEqual(response.status_code, 403)\n post = self.client.post(delete_url, delete_dict)\n self.assertEqual(post.status_code, 403)\n self.assertEqual(Article.objects.count(), 3)\n self.client.logout()\n\n # view user should not be able to delete articles\n self.client.force_login(self.viewuser)\n response = self.client.get(delete_url)\n self.assertEqual(response.status_code, 403)\n post = self.client.post(delete_url, delete_dict)\n self.assertEqual(post.status_code, 403)\n self.assertEqual(Article.objects.count(), 3)\n self.client.logout()\n\n # Delete user can delete\n self.client.force_login(self.deleteuser)\n response = self.client.get(\n reverse(\"admin:admin_views_section_delete\", args=(self.s1.pk,))\n )\n self.assertContains(response, \"

Summary

\")\n self.assertContains(response, \"
  • Articles: 3
  • \")\n # test response contains link to related Article\n self.assertContains(response, \"admin_views/article/%s/\" % self.a1.pk)\n\n response = self.client.get(delete_url)\n self.assertContains(response, \"admin_views/article/%s/\" % self.a1.pk)\n self.assertContains(response, \"

    Summary

    \")\n self.assertContains(response, \"
  • Articles: 1
  • \")\n post = self.client.post(delete_url, delete_dict)\n self.assertRedirects(post, self.index_url)\n self.assertEqual(Article.objects.count(), 2)\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, \"Greetings from a deleted object\")\n article_ct = ContentType.objects.get_for_model(Article)\n logged = LogEntry.objects.get(content_type=article_ct, action_flag=DELETION)\n self.assertEqual(logged.object_id, str(self.a1.pk))\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 410, + "n_words": 126, + "vocab_size": 71, + "complexity": 1, + "nloc": 36, + "token_counts": 387, + "n_ast_nodes": 624, + "n_identifiers": 40, + "d_id": 52091, + "documentation": { + "docstring": "Delete view should restrict access and actually delete items.", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 177869, + "commit_id": "d8d6a0554bfd263f8ce12ff3ce5a69986edd9bc0", + "repo": "label-studio", + "path": "label_studio/data_import/uploader.py", + "file_name": "uploader.py", + "fun_name": "tasks_from_url", + "commit_message": "fix: DEV-2361: Fix bandit check in LabelStudio Opensource (#2379)", + "code": "def tasks_from_url(file_upload_ids, project, request, url):\n \n # process URL with tasks\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n try:\n filename = url.rsplit('/', 1)[-1]\n with urlopen(url, context=ctx) as file: # nosec\n # check size\n meta = file.info()\n file.size = int(meta.get(\"Content-Length\"))\n file.urlopen = True\n check_file_sizes_and_number({url: file})\n file_content = file.read()\n if isinstance(file_content, str):\n file_content = file_content.encode()\n file_upload = create_file_upload(request, project, SimpleUploadedFile(filename, file_content))\n file_upload_ids.append(file_upload.id)\n tasks, found_formats, data_keys = FileUpload.load_tasks_from_uploaded_files(project, file_upload_ids)\n\n except ValidationError as e:\n raise e\n except Exception as e:\n raise ValidationError(str(e))\n return data_keys, found_formats, tasks, file_upload_ids\n\n", + "url": "https://github.com/heartexlabs/label-studio.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 266, + "n_words": 84, + "vocab_size": 62, + "complexity": 4, + "nloc": 22, + "token_counts": 179, + "n_ast_nodes": 291, + "n_identifiers": 40, + "d_id": 42529, + "documentation": { + "docstring": " Download file using URL and read tasks from it\n ", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 13, + "language": "en" + } + }, + { + "id": 271583, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/engine/training.py", + "file_name": "training.py", + "fun_name": "_multi_worker_concat", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def _multi_worker_concat(v, strategy):\n \n replicas = strategy.gather(v, axis=0)\n # v might not have the same shape on different replicas\n if _is_per_replica_instance(v):\n shapes = tf.concat(\n [\n tf.expand_dims(tf.shape(single_value)[0], axis=0)\n for single_value in v.values\n ],\n axis=0,\n )\n all_shapes = strategy.gather(shapes, axis=0)\n else:\n # v is a tensor. This may happen when, say, we have 2x1 multi-worker.\n all_shapes = strategy.gather(\n tf.expand_dims(tf.shape(v)[0], axis=0), axis=0\n )\n\n replicas = tf.split(\n replicas,\n num_or_size_splits=all_shapes,\n num=strategy.num_replicas_in_sync,\n )\n ordered_replicas = []\n num_replicas_per_worker = len(strategy.extended.worker_devices)\n for replica_id in range(num_replicas_per_worker):\n ordered_replicas += replicas[replica_id::num_replicas_per_worker]\n return concat(ordered_replicas)\n\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 258, + "n_words": 81, + "vocab_size": 62, + "complexity": 4, + "nloc": 25, + "token_counts": 161, + "n_ast_nodes": 248, + "n_identifiers": 26, + "d_id": 80811, + "documentation": { + "docstring": "Order PerReplica objects for CollectiveAllReduceStrategy and concat.", + "n_words": 7, + "vocab_size": 7, + "n_whitespaces": 6, + "language": "en" + } + }, + { + "id": 196041, + "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", + "repo": "sympy", + "path": "sympy/codegen/fnodes.py", + "file_name": "fnodes.py", + "fun_name": "shape", + "commit_message": "Updated import locations", + "code": "def shape(source, kind=None):\n \n return FunctionCall(\n 'shape',\n [_printable(source)] +\n ([_printable(kind)] if kind else [])\n )\n\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 44, + "n_words": 14, + "vocab_size": 14, + "complexity": 2, + "nloc": 6, + "token_counts": 36, + "n_ast_nodes": 59, + "n_identifiers": 5, + "d_id": 47541, + "documentation": { + "docstring": " Creates an AST node for a function call to Fortran's \"shape(...)\"\n\n Parameters\n ==========\n\n source : Symbol or String\n kind : expr\n\n Examples\n ========\n\n >>> from sympy import fcode\n >>> from sympy.codegen.fnodes import shape\n >>> shp = shape('x')\n >>> fcode(shp, source_format='free')\n 'shape(x)'\n\n ", + "n_words": 41, + "vocab_size": 35, + "n_whitespaces": 78, + "language": "en" + } + }, + { + "id": 177774, + "commit_id": "4ec4614e5e8b74795ecf8620e414f0340c6b94ef", + "repo": "label-studio", + "path": "label_studio/tests/test_predictions.py", + "file_name": "test_predictions.py", + "fun_name": "test_interactive_annotating_with_drafts", + "commit_message": "fix: DEV-2138: In interactive prediction only current user's draft should be sent (#2233)\n\n* fix: DEV-2138: In interactive prediction only current user's draft should be sent\r\n\r\n* Add test to check drafts in interactive prediction\r\n\r\nCo-authored-by: hlomzik ", + "code": "def test_interactive_annotating_with_drafts(business_client, configured_project):\n \n # create project with predefined task set\n ml_backend = configured_project.ml_backends.first()\n ml_backend.is_interactive = True\n ml_backend.save()\n\n users = list(User.objects.all())\n\n task = configured_project.tasks.first()\n AnnotationDraft.objects.create(task=task, user=users[0], result={}, lead_time=1)\n AnnotationDraft.objects.create(task=task, user=users[1], result={}, lead_time=2)\n # run prediction\n with requests_mock.Mocker(real_http=True) as m:\n m.register_uri('POST', f'{ml_backend.url}/predict', json={'results': [{'x': 'x'}]}, status_code=200)\n\n r = business_client.post(\n f'/api/ml/{ml_backend.pk}/interactive-annotating',\n data=json.dumps(\n {\n 'task': task.id,\n 'context': {'y': 'y'},\n }\n ),\n content_type=\"application/json\",\n )\n r.status_code = 200\n\n result = r.json()\n\n assert 'data' in result\n assert 'x' in result['data']\n assert result['data']['x'] == 'x'\n\n history = [req for req in m.request_history if 'predict' in req.path][0]\n assert history.text\n\n js = json.loads(history.text)\n\n assert len(js['tasks'][0]['drafts']) == 1", + "url": "https://github.com/heartexlabs/label-studio.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 18, + "n_whitespaces": 326, + "n_words": 97, + "vocab_size": 74, + "complexity": 3, + "nloc": 29, + "token_counts": 260, + "n_ast_nodes": 448, + "n_identifiers": 43, + "d_id": 42498, + "documentation": { + "docstring": "\n Test interactive annotating with drafts\n :param business_client:\n :param configured_project:\n :return:\n ", + "n_words": 10, + "vocab_size": 9, + "n_whitespaces": 26, + "language": "en" + } + }, + { + "id": 20072, + "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", + "repo": "pipenv", + "path": "pipenv/patched/notpip/_vendor/distro.py", + "file_name": "distro.py", + "fun_name": "_parse_distro_release_file", + "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", + "code": "def _parse_distro_release_file(self, filepath):\n # type: (str) -> Dict[str, str]\n \n try:\n with open(filepath) as fp:\n # Only parse the first line. For instance, on SLES there\n # are multiple lines. We don't want them...\n return self._parse_distro_release_content(fp.readline())\n except (OSError, IOError):\n # Ignore not being able to read a specific, seemingly version\n # related file.\n # See https://github.com/python-distro/distro/issues/162\n return {}\n", + "url": "https://github.com/pypa/pipenv.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 185, + "n_words": 57, + "vocab_size": 51, + "complexity": 2, + "nloc": 6, + "token_counts": 39, + "n_ast_nodes": 74, + "n_identifiers": 9, + "d_id": 3218, + "documentation": { + "docstring": "\n Parse a distro release file.\n\n Parameters:\n\n * filepath: Path name of the distro release file.\n\n Returns:\n A dictionary containing all information items.\n ", + "n_words": 22, + "vocab_size": 19, + "n_whitespaces": 69, + "language": "en" + } + }, + { + "id": 253601, + "commit_id": "19aba1f059efad45e1466d47954b2cf54d45b106", + "repo": "d2l-en", + "path": "d2l/mxnet.py", + "file_name": "mxnet.py", + "fun_name": "save_hyperparameters", + "commit_message": "simplify d2l lib", + "code": "def save_hyperparameters(self, ignore=[]):\n \n frame = inspect.currentframe().f_back\n _, _, _, local_vars = inspect.getargvalues(frame)\n self.hparams = {k:v for k, v in local_vars.items()\n if k not in set(ignore+['self']) and not k.startswith('_')}\n for k, v in self.hparams.items():\n setattr(self, k, v)\n", + "url": "https://github.com/d2l-ai/d2l-en.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 105, + "n_words": 36, + "vocab_size": 25, + "complexity": 5, + "nloc": 7, + "token_counts": 94, + "n_ast_nodes": 150, + "n_identifiers": 17, + "d_id": 74148, + "documentation": { + "docstring": "Save function arguments into class attributes.\n\n Defined in :numref:`sec_utils`", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 15, + "language": "en" + } + }, + { + "id": 74148, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/core/tests/test_blocks.py", + "file_name": "test_blocks.py", + "fun_name": "test_str_structvalue", + "commit_message": "Reformat with black", + "code": "def test_str_structvalue(self):\n \n block = SectionBlock()\n value = block.to_python({\"title\": \"Hello\", \"body\": \"italic world\"})\n result = str(value)\n self.assertNotIn(\"

    \", result)\n # The expected rendering should correspond to the native representation of an OrderedDict:\n # \"StructValue([('title', u'Hello'), ('body', )])\"\n # - give or take some quoting differences between Python versions\n self.assertIn(\"StructValue\", result)\n self.assertIn(\"title\", result)\n self.assertIn(\"Hello\", result)\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 132, + "n_words": 55, + "vocab_size": 48, + "complexity": 1, + "nloc": 8, + "token_counts": 65, + "n_ast_nodes": 123, + "n_identifiers": 10, + "d_id": 16224, + "documentation": { + "docstring": "\n The str() representation of a StructValue should NOT render the template, as that's liable\n to cause an infinite loop if any debugging / logging code attempts to log the fact that\n it rendered a template with this object in the context:\n https://github.com/wagtail/wagtail/issues/2874\n https://github.com/jazzband/django-debug-toolbar/issues/950\n ", + "n_words": 43, + "vocab_size": 39, + "n_whitespaces": 86, + "language": "en" + } + }, + { + "id": 251362, + "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", + "repo": "mitmproxy", + "path": "mitmproxy/flow.py", + "file_name": "flow.py", + "fun_name": "revert", + "commit_message": "make it black!", + "code": "def revert(self):\n \n if self._backup:\n self.set_state(self._backup)\n self._backup = None\n", + "url": "https://github.com/mitmproxy/mitmproxy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 44, + "n_words": 8, + "vocab_size": 8, + "complexity": 2, + "nloc": 4, + "token_counts": 24, + "n_ast_nodes": 42, + "n_identifiers": 4, + "d_id": 73697, + "documentation": { + "docstring": "\n Revert to the last backed up state.\n ", + "n_words": 7, + "vocab_size": 7, + "n_whitespaces": 22, + "language": "en" + } + }, + { + "id": 289375, + "commit_id": "31a787558fd312331b55e5c2c4b33341fc3601fc", + "repo": "core", + "path": "tests/components/history/test_init.py", + "file_name": "test_init.py", + "fun_name": "test_fetch_period_api_with_no_timestamp", + "commit_message": "Ensure recorder test fixture is setup before hass fixture (#80528)\n\n* Ensure recorder test fixture is setup before hass fixture\r\n\r\n* Adjust more tests", + "code": "async def test_fetch_period_api_with_no_timestamp(recorder_mock, hass, hass_client):\n \n await async_setup_component(hass, \"history\", {})\n client = await hass_client()\n response = await client.get(\"/api/history/period\")\n assert response.status == HTTPStatus.OK\n\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 36, + "n_words": 21, + "vocab_size": 18, + "complexity": 1, + "nloc": 5, + "token_counts": 43, + "n_ast_nodes": 75, + "n_identifiers": 11, + "d_id": 88517, + "documentation": { + "docstring": "Test the fetch period view for history with no timestamp.", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 271626, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/engine/training_arrays_test.py", + "file_name": "training_arrays_test.py", + "fun_name": "test_print_info_with_numpy", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def test_print_info_with_numpy(self, do_validation):\n \n\n model = keras.models.Sequential(\n [keras.layers.Dense(1, input_shape=(2,))]\n )\n model.compile(loss=\"mse\", optimizer=\"sgd\")\n\n dataset = np.arange(200).reshape(100, 2)\n\n if do_validation:\n val_data = (\n np.arange(100).reshape(50, 2),\n np.arange(50).reshape(50, 1),\n )\n else:\n val_data = None\n\n mock_stdout = io.StringIO()\n with tf.compat.v1.test.mock.patch.object(sys, \"stdout\", mock_stdout):\n model.fit(\n dataset, batch_size=10, epochs=2, validation_data=val_data\n )\n\n self.assertIn(\"Train on 100 samples\", mock_stdout.getvalue())\n\n if do_validation:\n self.assertIn(\", validate on 50 samples\", mock_stdout.getvalue())\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 254, + "n_words": 55, + "vocab_size": 43, + "complexity": 3, + "nloc": 21, + "token_counts": 175, + "n_ast_nodes": 280, + "n_identifiers": 35, + "d_id": 80835, + "documentation": { + "docstring": "Print training info should work with val datasets (b/133391839).", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 157317, + "commit_id": "ca86da3a30c4e080d4db8c25fca73de843663cb4", + "repo": "stablediffusion", + "path": "ldm/models/diffusion/ddpm.py", + "file_name": "ddpm.py", + "fun_name": "q_mean_variance", + "commit_message": "release more models", + "code": "def q_mean_variance(self, x_start, t):\n \n mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)\n variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)\n log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)\n return mean, variance, log_variance\n", + "url": "https://github.com/Stability-AI/stablediffusion.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 62, + "n_words": 27, + "vocab_size": 20, + "complexity": 1, + "nloc": 5, + "token_counts": 66, + "n_ast_nodes": 94, + "n_identifiers": 12, + "d_id": 36898, + "documentation": { + "docstring": "\n Get the distribution q(x_t | x_0).\n :param x_start: the [N x C x ...] tensor of noiseless inputs.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :return: A tuple (mean, variance, log_variance), all of x_start's shape.\n ", + "n_words": 42, + "vocab_size": 36, + "n_whitespaces": 78, + "language": "en" + } + }, + { + "id": 242362, + "commit_id": "6be87277f71948bc7e4b945c46660cac3e5ce919", + "repo": "Pillow", + "path": "src/PIL/Image.py", + "file_name": "Image.py", + "fun_name": "getpalette", + "commit_message": "Allow rawmode None to return the palette in the current mode", + "code": "def getpalette(self, rawmode=\"RGB\"):\n \n\n self.load()\n try:\n mode = self.im.getpalettemode()\n except ValueError:\n return None # no palette\n if rawmode is None:\n rawmode = mode\n return list(self.im.getpalette(mode, rawmode))\n", + "url": "https://github.com/python-pillow/Pillow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 101, + "n_words": 25, + "vocab_size": 21, + "complexity": 3, + "nloc": 9, + "token_counts": 53, + "n_ast_nodes": 91, + "n_identifiers": 9, + "d_id": 69846, + "documentation": { + "docstring": "\n Returns the image palette as a list.\n\n :param rawmode: The mode in which to return the palette. ``None`` will\n return the palette in its current mode.\n :returns: A list of color values [r, g, b, ...], or None if the\n image has no palette.\n ", + "n_words": 44, + "vocab_size": 36, + "n_whitespaces": 93, + "language": "en" + } + }, + { + "id": 321036, + "commit_id": "8eecf3af83fc9a4e465744a83e86856fe1c6df10", + "repo": "qutebrowser", + "path": "tests/unit/config/test_config.py", + "file_name": "test_config.py", + "fun_name": "test_get_mutable_invalid_value", + "commit_message": "config: Discard prior mutables before applying\n\nIf we only clear existing mutables *after* applying, we get into an\ninconsistent state if there was an error in one of the config values:\n\nThe improper value lingers around in self._mutables, and then gets\nreturned when get_mutable_obj() (or update_mutables()) gets called the\nnext time.\n\nReproducer:\n\n qutebrowser --debug --temp-basedir \\\n ':config-dict-add content.javascript.log_message.levels example.org bla' \\\n ':later 1000 config-dict-add content.javascript.log_message.levels example.org bla'\n\nResults in:\n\n ERROR: Invalid value 'bla' - expected a value of type list but got str.\n ERROR: example.org already exists in content.javascript.log_message - use --replace to overwrite!\n\nFixes the second part of #7343.\n\nnb: As before, the mutable updating actually gets interrupted by a\nfailing update, instead of it e.g. collecting all errors but carrying\non. With this change, the remaining updates will thus also be discarded,\nbut that does not seem to be a problem with how mutables are currently\nused. Ideally, we should get rid of the mutable handling entirely\nanyways, at least for qutebrowser internal code - see #4344.", + "code": "def test_get_mutable_invalid_value(self, conf):\n \n option = 'keyhint.blacklist'\n obj = conf.get_mutable_obj(option)\n assert obj == []\n obj.append(42)\n\n with pytest.raises(configexc.ValidationError):\n conf.update_mutables()\n\n obj = conf.get_mutable_obj(option)\n assert obj == []\n", + "url": "https://github.com/qutebrowser/qutebrowser.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 91, + "n_words": 24, + "vocab_size": 15, + "complexity": 1, + "nloc": 9, + "token_counts": 58, + "n_ast_nodes": 101, + "n_identifiers": 12, + "d_id": 117490, + "documentation": { + "docstring": "Make sure invalid values aren't stored in mutables.", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 278640, + "commit_id": "3613c3defc39c236fb1592c4f7ba1a9cc887343a", + "repo": "keras", + "path": "keras/callbacks.py", + "file_name": "callbacks.py", + "fun_name": "keras_model_summary", + "commit_message": "Remove pylint comments.\n\nPiperOrigin-RevId: 452353044", + "code": "def keras_model_summary(name, data, step=None):\n \n summary_metadata = tf.compat.v1.SummaryMetadata()\n # Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for\n # the rationale.\n summary_metadata.plugin_data.plugin_name = \"graph_keras_model\"\n # version number = 1\n summary_metadata.plugin_data.content = b\"1\"\n\n try:\n json_string = data.to_json()\n except Exception as exc:\n # An exception should not break a model code.\n logging.warning(\n \"Model failed to serialize as JSON. Ignoring... %s\", exc\n )\n return False\n\n with tf.summary.experimental.summary_scope(\n name, \"graph_keras_model\", [data, step]\n ) as (tag, _):\n with tf.device(\"cpu:0\"):\n tensor = tf.constant(json_string, dtype=tf.string)\n return tf.summary.write(\n tag=tag, tensor=tensor, step=step, metadata=summary_metadata\n )\n\n\n@keras_export(\"keras.callbacks.TensorBoard\", v1=[])", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "@keras_export(\"keras.callbacks.TensorBoard\", v1=[])", + "n_ast_errors": 1, + "ast_levels": 14, + "n_whitespaces": 215, + "n_words": 87, + "vocab_size": 71, + "complexity": 2, + "nloc": 19, + "token_counts": 133, + "n_ast_nodes": 239, + "n_identifiers": 31, + "d_id": 82645, + "documentation": { + "docstring": "Writes a Keras model as JSON to as a Summary.\n\n Writing the Keras model configuration allows the TensorBoard graph plugin to\n render a conceptual graph, as opposed to graph of ops. In case the model\n fails to serialize as JSON, it ignores and returns False.\n\n Args:\n name: A name for this summary. The summary tag used for TensorBoard will\n be this name prefixed by any active name scopes.\n data: A Keras Model to write.\n step: Explicit `int64`-castable monotonic step value for this summary. If\n omitted, this defaults to `tf.summary.experimental.get_step()`, which\n must not be None.\n\n Returns:\n True on success, or False if no summary was written because no default\n summary writer was available.\n\n Raises:\n ValueError: if a default writer exists, but no step was provided and\n `tf.summary.experimental.get_step()` is None.\n ", + "n_words": 128, + "vocab_size": 87, + "n_whitespaces": 207, + "language": "en" + } + }, + { + "id": 44415, + "commit_id": "6fc6edf6af7f676bfa54ff3a2e6e6d2edb938f2e", + "repo": "airflow", + "path": "airflow/jobs/backfill_job.py", + "file_name": "backfill_job.py", + "fun_name": "_update_counters", + "commit_message": "Make `airflow dags test` be able to execute Mapped Tasks (#21210)\n\n* Make `airflow dags test` be able to execute Mapped Tasks\r\n\r\nIn order to do this there were two steps required:\r\n\r\n- The BackfillJob needs to know about mapped tasks, both to expand them,\r\n and in order to update it's TI tracking\r\n- The DebugExecutor needed to \"unmap\" the mapped task to get the real\r\n operator back\r\n\r\nI was testing this with the following dag:\r\n\r\n```\r\nfrom airflow import DAG\r\nfrom airflow.decorators import task\r\nfrom airflow.operators.python import PythonOperator\r\nimport pendulum\r\n\r\n@task\r\ndef make_list():\r\n return list(map(lambda a: f'echo \"{a!r}\"', [1, 2, {'a': 'b'}]))\r\n\r\ndef consumer(*args):\r\n print(repr(args))\r\n\r\nwith DAG(dag_id='maptest', start_date=pendulum.DateTime(2022, 1, 18)) as dag:\r\n PythonOperator(task_id='consumer', python_callable=consumer).map(op_args=make_list())\r\n```\r\n\r\nIt can't \"unmap\" decorated operators successfully yet, so we're using\r\nold-school PythonOperator\r\n\r\nWe also just pass the whole value to the operator, not just the current\r\nmapping value(s)\r\n\r\n* Always have a `task_group` property on DAGNodes\r\n\r\nAnd since TaskGroup is a DAGNode, we don't need to store parent group\r\ndirectly anymore -- it'll already be stored\r\n\r\n* Add \"integation\" tests for running mapped tasks via BackfillJob\r\n\r\n* Only show \"Map Index\" in Backfill report when relevant\r\n\r\nCo-authored-by: Tzu-ping Chung ", + "code": "def _update_counters(self, ti_status, session=None):\n \n tis_to_be_scheduled = []\n refreshed_tis = []\n TI = TaskInstance\n\n filter_for_tis = TI.filter_for_tis(list(ti_status.running.values()))\n if filter_for_tis is not None:\n refreshed_tis = session.query(TI).filter(filter_for_tis).all()\n\n for ti in refreshed_tis:\n # Here we remake the key by subtracting 1 to match in memory information\n reduced_key = ti.key.reduced\n if ti.state == TaskInstanceState.SUCCESS:\n ti_status.succeeded.add(reduced_key)\n self.log.debug(\"Task instance %s succeeded. Don't rerun.\", ti)\n ti_status.running.pop(reduced_key)\n continue\n if ti.state == TaskInstanceState.SKIPPED:\n ti_status.skipped.add(reduced_key)\n self.log.debug(\"Task instance %s skipped. Don't rerun.\", ti)\n ti_status.running.pop(reduced_key)\n continue\n if ti.state == TaskInstanceState.FAILED:\n self.log.error(\"Task instance %s failed\", ti)\n ti_status.failed.add(reduced_key)\n ti_status.running.pop(reduced_key)\n continue\n # special case: if the task needs to run again put it back\n if ti.state == TaskInstanceState.UP_FOR_RETRY:\n self.log.warning(\"Task instance %s is up for retry\", ti)\n ti_status.running.pop(reduced_key)\n ti_status.to_run[ti.key] = ti\n # special case: if the task needs to be rescheduled put it back\n elif ti.state == TaskInstanceState.UP_FOR_RESCHEDULE:\n self.log.warning(\"Task instance %s is up for reschedule\", ti)\n # During handling of reschedule state in ti._handle_reschedule, try number is reduced\n # by one, so we should not use reduced_key to avoid key error\n ti_status.running.pop(ti.key)\n ti_status.to_run[ti.key] = ti\n # special case: The state of the task can be set to NONE by the task itself\n # when it reaches concurrency limits. It could also happen when the state\n # is changed externally, e.g. by clearing tasks from the ui. We need to cover\n # for that as otherwise those tasks would fall outside of the scope of\n # the backfill suddenly.\n elif ti.state == State.NONE:\n self.log.warning(\n \"FIXME: task instance %s state was set to none externally or \"\n \"reaching concurrency limits. Re-adding task to queue.\",\n ti,\n )\n tis_to_be_scheduled.append(ti)\n ti_status.running.pop(reduced_key)\n ti_status.to_run[ti.key] = ti\n\n # Batch schedule of task instances\n if tis_to_be_scheduled:\n filter_for_tis = TI.filter_for_tis(tis_to_be_scheduled)\n session.query(TI).filter(filter_for_tis).update(\n values={TI.state: TaskInstanceState.SCHEDULED}, synchronize_session=False\n )\n session.flush()\n", + "url": "https://github.com/apache/airflow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 1009, + "n_words": 279, + "vocab_size": 148, + "complexity": 10, + "nloc": 47, + "token_counts": 350, + "n_ast_nodes": 578, + "n_identifiers": 43, + "d_id": 8254, + "documentation": { + "docstring": "\n Updates the counters per state of the tasks that were running. Can re-add\n to tasks to run in case required.\n\n :param ti_status: the internal status of the backfill job tasks\n ", + "n_words": 30, + "vocab_size": 23, + "n_whitespaces": 59, + "language": "en" + } + }, + { + "id": 20223, + "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", + "repo": "pipenv", + "path": "pipenv/patched/notpip/_vendor/platformdirs/macos.py", + "file_name": "macos.py", + "fun_name": "user_config_dir", + "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", + "code": "def user_config_dir(self) -> str:\n \n return self._append_app_name_and_version(os.path.expanduser(\"~/Library/Preferences/\"))\n", + "url": "https://github.com/pypa/pipenv.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 20, + "n_words": 6, + "vocab_size": 6, + "complexity": 1, + "nloc": 3, + "token_counts": 22, + "n_ast_nodes": 40, + "n_identifiers": 7, + "d_id": 3275, + "documentation": { + "docstring": ":return: config directory tied to the user, e.g. ``~/Library/Preferences/$appname/$version``", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 65680, + "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", + "repo": "erpnext", + "path": "erpnext/controllers/stock_controller.py", + "file_name": "stock_controller.py", + "fun_name": "future_sle_exists", + "commit_message": "style: format code with black", + "code": "def future_sle_exists(args, sl_entries=None):\n\tkey = (args.voucher_type, args.voucher_no)\n\n\tif validate_future_sle_not_exists(args, key, sl_entries):\n\t\treturn False\n\telif get_cached_data(args, key):\n\t\treturn True\n\n\tif not sl_entries:\n\t\tsl_entries = get_sle_entries_against_voucher(args)\n\t\tif not sl_entries:\n\t\t\treturn\n\n\tor_conditions = get_conditions_to_validate_future_sle(sl_entries)\n\n\tdata = frappe.db.sql(\n\t\t.format(\n\t\t\t\" or \".join(or_conditions)\n\t\t),\n\t\targs,\n\t\tas_dict=1,\n\t)\n\n\tfor d in data:\n\t\tfrappe.local.future_sle[key][(d.item_code, d.warehouse)] = d.total_row\n\n\treturn len(data)\n\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 31, + "n_words": 52, + "vocab_size": 41, + "complexity": 6, + "nloc": 32, + "token_counts": 123, + "n_ast_nodes": 190, + "n_identifiers": 25, + "d_id": 13986, + "documentation": { + "docstring": "\n\t\tselect item_code, warehouse, count(name) as total_row\n\t\tfrom `tabStock Ledger Entry` force index (item_warehouse)\n\t\twhere\n\t\t\t({})\n\t\t\tand timestamp(posting_date, posting_time)\n\t\t\t\t>= timestamp(%(posting_date)s, %(posting_time)s)\n\t\t\tand voucher_no != %(voucher_no)s\n\t\t\tand is_cancelled = 0\n\t\tGROUP BY\n\t\t\titem_code, warehouse\n\t\t", + "n_words": 33, + "vocab_size": 30, + "n_whitespaces": 23, + "language": "en" + } + }, + { + "id": 90529, + "commit_id": "6307cf52c4c7f185f9023c6279e565dd7812c202", + "repo": "sentry", + "path": "src/sentry/snuba/metrics_performance.py", + "file_name": "metrics_performance.py", + "fun_name": "normalize_histogram_results", + "commit_message": "feat(mep): Adding histogram support to metrics enhanced perf (#34462)\n\n- This uses the metrics dataset to supply histogram data in the same\r\n format discover expects\r\n- Outlier is currently based on p25 and p75, may change to using tags\r\n later", + "code": "def normalize_histogram_results(fields, histogram_params, results):\n \n\n # zerofill and rename the columns while making sure to adjust for precision\n bucket_maps = {field: {} for field in fields}\n # Only one row in metrics result\n data = results[\"data\"][0]\n for field in fields:\n histogram_column = f\"histogram({field})\"\n histogram_alias = get_function_alias(histogram_column)\n bucket_maps[field] = {start: height for start, end, height in data[histogram_alias]}\n\n new_data = {field: [] for field in fields}\n for i in range(histogram_params.num_buckets):\n bucket = histogram_params.start_offset + histogram_params.bucket_size * i\n for field in fields:\n row = {\n \"bin\": bucket,\n \"count\": bucket_maps[field].get(bucket, 0),\n }\n # make sure to adjust for the precision if necessary\n if histogram_params.multiplier > 1:\n row[\"bin\"] /= float(histogram_params.multiplier)\n new_data[field].append(row)\n\n return new_data\n", + "url": "https://github.com/getsentry/sentry.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 269, + "n_words": 107, + "vocab_size": 69, + "complexity": 8, + "nloc": 19, + "token_counts": 157, + "n_ast_nodes": 249, + "n_identifiers": 25, + "d_id": 18673, + "documentation": { + "docstring": "\n Normalizes the histogram results by renaming the columns to key and bin\n and make sure to zerofill any missing values.\n\n :param [str] fields: The list of fields for which you want to generate the\n histograms for.\n :param str key_column: The column of the key name.\n :param HistogramParams histogram_params: The histogram parameters used.\n :param any results: The results from the histogram query that may be missing\n bins and needs to be normalized.\n :param str array_column: Array column prefix\n ", + "n_words": 77, + "vocab_size": 51, + "n_whitespaces": 116, + "language": "en" + } + }, + { + "id": 260053, + "commit_id": "a2c4d8b1f4471f52a4fcf1026f495e637a472568", + "repo": "scikit-learn", + "path": "sklearn/utils/sparsefuncs.py", + "file_name": "sparsefuncs.py", + "fun_name": "inplace_swap_column", + "commit_message": "DOC Ensures that inplace_swap_column passes numpydoc validation (#23476)\n\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: harshit5674 ", + "code": "def inplace_swap_column(X, m, n):\n \n if m < 0:\n m += X.shape[1]\n if n < 0:\n n += X.shape[1]\n if isinstance(X, sp.csc_matrix):\n inplace_swap_row_csr(X, m, n)\n elif isinstance(X, sp.csr_matrix):\n inplace_swap_row_csc(X, m, n)\n else:\n _raise_typeerror(X)\n\n", + "url": "https://github.com/scikit-learn/scikit-learn.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 85, + "n_words": 32, + "vocab_size": 20, + "complexity": 5, + "nloc": 11, + "token_counts": 78, + "n_ast_nodes": 120, + "n_identifiers": 12, + "d_id": 76051, + "documentation": { + "docstring": "\n Swap two columns of a CSC/CSR matrix in-place.\n\n Parameters\n ----------\n X : sparse matrix of shape (n_samples, n_features)\n Matrix whose two columns are to be swapped. It should be of\n CSR or CSC format.\n\n m : int\n Index of the column of X to be swapped.\n\n n : int\n Index of the column of X to be swapped.\n ", + "n_words": 58, + "vocab_size": 34, + "n_whitespaces": 108, + "language": "en" + } + }, + { + "id": 163192, + "commit_id": "521259299f7829da667ba39302ec77acedde9e5e", + "repo": "pandas", + "path": "pandas/core/indexes/category.py", + "file_name": "category.py", + "fun_name": "map", + "commit_message": "DOC: Improve doc summaries in series.rst (#45237)", + "code": "def map(self, mapper):\n \n mapped = self._values.map(mapper)\n return Index(mapped, name=self.name)\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 30, + "n_words": 9, + "vocab_size": 9, + "complexity": 1, + "nloc": 3, + "token_counts": 29, + "n_ast_nodes": 47, + "n_identifiers": 7, + "d_id": 39400, + "documentation": { + "docstring": "\n Map values using input an input mapping or function.\n\n Maps the values (their categories, not the codes) of the index to new\n categories. If the mapping correspondence is one-to-one the result is a\n :class:`~pandas.CategoricalIndex` which has the same order property as\n the original, otherwise an :class:`~pandas.Index` is returned.\n\n If a `dict` or :class:`~pandas.Series` is used any unmapped category is\n mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`\n will be returned.\n\n Parameters\n ----------\n mapper : function, dict, or Series\n Mapping correspondence.\n\n Returns\n -------\n pandas.CategoricalIndex or pandas.Index\n Mapped index.\n\n See Also\n --------\n Index.map : Apply a mapping correspondence on an\n :class:`~pandas.Index`.\n Series.map : Apply a mapping correspondence on a\n :class:`~pandas.Series`.\n Series.apply : Apply more complex functions on a\n :class:`~pandas.Series`.\n\n Examples\n --------\n >>> idx = pd.CategoricalIndex(['a', 'b', 'c'])\n >>> idx\n CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],\n ordered=False, dtype='category')\n >>> idx.map(lambda x: x.upper())\n CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],\n ordered=False, dtype='category')\n >>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})\n CategoricalIndex(['first', 'second', 'third'], categories=['first',\n 'second', 'third'], ordered=False, dtype='category')\n\n If the mapping is one-to-one the ordering of the categories is\n preserved:\n\n >>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)\n >>> idx\n CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],\n ordered=True, dtype='category')\n >>> idx.map({'a': 3, 'b': 2, 'c': 1})\n CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,\n dtype='category')\n\n If the mapping is not one-to-one an :class:`~pandas.Index` is returned:\n\n >>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})\n Index(['first', 'second', 'first'], dtype='object')\n\n If a `dict` is used, all unmapped categories are mapped to `NaN` and\n the result is an :class:`~pandas.Index`:\n\n >>> idx.map({'a': 'first', 'b': 'second'})\n Index(['first', 'second', nan], dtype='object')\n ", + "n_words": 256, + "vocab_size": 131, + "n_whitespaces": 734, + "language": "en" + } + }, + { + "id": 178013, + "commit_id": "583b3cb3b03a36a30b3ce9fe96eb4fb28548a070", + "repo": "label-studio", + "path": "label_studio/core/label_config.py", + "file_name": "label_config.py", + "fun_name": "get_original_fromname_by_regex", + "commit_message": "fix: DEV-1462: Fix changing label config for repeater tag (#2725)\n\n* fix: DEV-1462: Fix changing label config for repeater tag with created annotations", + "code": "def get_original_fromname_by_regex(config_string, fromname):\n \n c = parse_config(config_string)\n for control in c:\n item = c[control].get('regex', {})\n expression = control\n for key in item:\n expression = expression.replace(key, item[key])\n pattern = re.compile(expression)\n full_match = pattern.fullmatch(fromname)\n if full_match:\n return control\n return fromname\n\n", + "url": "https://github.com/heartexlabs/label-studio.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 113, + "n_words": 37, + "vocab_size": 26, + "complexity": 4, + "nloc": 12, + "token_counts": 77, + "n_ast_nodes": 123, + "n_identifiers": 16, + "d_id": 42571, + "documentation": { + "docstring": "\n Get from_name from config on from_name key from data after applying regex search or original fromname\n ", + "n_words": 16, + "vocab_size": 14, + "n_whitespaces": 23, + "language": "en" + } + }, + { + "id": 72652, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/api/v2/filters.py", + "file_name": "filters.py", + "fun_name": "filter_queryset", + "commit_message": "Reformat with black", + "code": "def filter_queryset(self, request, queryset, view):\n \n fields = set(view.get_available_fields(queryset.model, db_fields_only=True))\n\n # Locale is a database field, but we provide a separate filter for it\n if \"locale\" in fields:\n fields.remove(\"locale\")\n\n for field_name, value in request.GET.items():\n if field_name in fields:\n try:\n field = queryset.model._meta.get_field(field_name)\n except LookupError:\n field = None\n\n # Convert value into python\n try:\n if isinstance(\n field, (models.BooleanField, models.NullBooleanField)\n ):\n value = parse_boolean(value)\n elif isinstance(field, (models.IntegerField, models.AutoField)):\n value = int(value)\n elif isinstance(field, models.ForeignKey):\n value = field.target_field.get_prep_value(value)\n except ValueError as e:\n raise BadRequestError(\n \"field filter error. '%s' is not a valid value for %s (%s)\"\n % (value, field_name, str(e))\n )\n\n if isinstance(field, TaggableManager):\n for tag in value.split(\",\"):\n queryset = queryset.filter(**{field_name + \"__name\": tag})\n\n # Stick a message on the queryset to indicate that tag filtering has been performed\n # This will let the do_search method know that it must raise an error as searching\n # and tag filtering at the same time is not supported\n queryset._filtered_by_tag = True\n else:\n queryset = queryset.filter(**{field_name: value})\n\n return queryset\n\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 21, + "n_whitespaces": 758, + "n_words": 162, + "vocab_size": 108, + "complexity": 11, + "nloc": 31, + "token_counts": 220, + "n_ast_nodes": 359, + "n_identifiers": 39, + "d_id": 15909, + "documentation": { + "docstring": "\n This performs field level filtering on the result set\n Eg: ?title=James Joyce\n ", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 34, + "language": "en" + } + }, + { + "id": 91126, + "commit_id": "3535fa83a743f6967a92e0afdbba2b633236bf67", + "repo": "sentry", + "path": "src/sentry/snuba/metrics/utils.py", + "file_name": "utils.py", + "fun_name": "generate_operation_regex", + "commit_message": "fix(metrics): Restrict public name regex [TET-111] (#35305)\n\n* fix(metrics): Restrict public name regex\r\n\r\nRestricts the public name regex for metrics\r\nby separating out the public name regex\r\nand the mri name regex from the shared field\r\nregex and removing characters that are not\r\nexpected to be in public facing names\r\n\r\n* Update mri regex and public regex + add tests\r\n\r\n* Add better tests\r\n\r\n* PR comments", + "code": "def generate_operation_regex():\n \n operations = []\n for item in OP_TO_SNUBA_FUNCTION.values():\n operations += list(item.keys())\n return rf\"({'|'.join(map(str, operations))})\"\n\n\nOP_REGEX = generate_operation_regex()\n\n\nAVAILABLE_OPERATIONS = {\n type_: sorted(mapping.keys()) for type_, mapping in OP_TO_SNUBA_FUNCTION.items()\n}\nOPERATIONS_TO_ENTITY = {\n op: entity for entity, operations in AVAILABLE_OPERATIONS.items() for op in operations\n}\n\n# ToDo add guages/summaries\nMETRIC_TYPE_TO_ENTITY: Mapping[MetricType, EntityKey] = {\n \"counter\": EntityKey.MetricsCounters,\n \"set\": EntityKey.MetricsSets,\n \"distribution\": EntityKey.MetricsDistributions,\n}\n\nFIELD_ALIAS_MAPPINGS = {\"project\": \"project_id\"}\n\n", + "url": "https://github.com/getsentry/sentry.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 89, + "n_words": 64, + "vocab_size": 46, + "complexity": 2, + "nloc": 5, + "token_counts": 31, + "n_ast_nodes": 216, + "n_identifiers": 27, + "d_id": 18716, + "documentation": { + "docstring": "\n Generates a regex of all supported operations defined in OP_TO_SNUBA_FUNCTION\n ", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 17, + "language": "en" + } + }, + { + "id": 243053, + "commit_id": "c854bf8d1c05022bec4309fbf6b547e494db9373", + "repo": "Pillow", + "path": "src/PIL/ImageFont.py", + "file_name": "ImageFont.py", + "fun_name": "getbbox", + "commit_message": "add getbbox and getlength to basic ImageFont and update related tests", + "code": "def getbbox(self, text, *args, **kwargs):\n \n width, height = self.font.getsize(text)\n return 0, 0, width, height\n", + "url": "https://github.com/python-pillow/Pillow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 35, + "n_words": 14, + "vocab_size": 11, + "complexity": 1, + "nloc": 3, + "token_counts": 34, + "n_ast_nodes": 52, + "n_identifiers": 9, + "d_id": 69963, + "documentation": { + "docstring": "\n Returns bounding box (in pixels) of given text.\n\n .. versionadded:: 9.2.0\n\n :param text: Text to render.\n :param mode: Used by some graphics drivers to indicate what mode the\n driver prefers; if empty, the renderer may return either\n mode. Note that the mode is always a string, to simplify\n C-level implementations.\n\n :return: ``(left, top, right, bottom)`` bounding box\n ", + "n_words": 57, + "vocab_size": 49, + "n_whitespaces": 160, + "language": "en" + } + }, + { + "id": 101720, + "commit_id": "e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1", + "repo": "faceswap", + "path": "tools/alignments/jobs.py", + "file_name": "jobs.py", + "fun_name": "_validate", + "commit_message": "Alignments Tool - Typing, Documentation + Re-org", + "code": "def _validate(self) -> None:\n \n if self._job == \"missing-frames\" and self._output == \"move\":\n logger.warning(\"Missing_frames was selected with move output, but there will \"\n \"be nothing to move. Defaulting to output: console\")\n self._output = \"console\"\n if self._type == \"faces\" and self._job != \"multi-faces\":\n logger.error(\"The selected folder is not valid. Faces folder (-fc) is only \"\n \"supported for 'multi-faces'\")\n sys.exit(1)\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 172, + "n_words": 57, + "vocab_size": 46, + "complexity": 5, + "nloc": 10, + "token_counts": 59, + "n_ast_nodes": 116, + "n_identifiers": 10, + "d_id": 21124, + "documentation": { + "docstring": " Check that the selected type is valid for selected task and job ", + "n_words": 12, + "vocab_size": 11, + "n_whitespaces": 13, + "language": "en" + } + }, + { + "id": 176718, + "commit_id": "2a05ccdb07cff88e56661dee8a9271859354027f", + "repo": "networkx", + "path": "networkx/algorithms/link_analysis/pagerank_alg.py", + "file_name": "pagerank_alg.py", + "fun_name": "pagerank_numpy", + "commit_message": "Remove redundant py2 numeric conversions (#5661)\n\n* Remove redundant float conversion\r\n\r\n* Remove redundant int conversion\r\n\r\n* Use integer division\r\n\r\nCo-authored-by: Miroslav Šedivý <6774676+eumiro@users.noreply.github.com>", + "code": "def pagerank_numpy(G, alpha=0.85, personalization=None, weight=\"weight\", dangling=None):\n \n msg = \"networkx.pagerank_numpy is deprecated and will be removed in NetworkX 3.0, use networkx.pagerank instead.\"\n warn(msg, DeprecationWarning, stacklevel=2)\n import numpy as np\n\n if len(G) == 0:\n return {}\n M = google_matrix(\n G, alpha, personalization=personalization, weight=weight, dangling=dangling\n )\n # use numpy LAPACK solver\n eigenvalues, eigenvectors = np.linalg.eig(M.T)\n ind = np.argmax(eigenvalues)\n # eigenvector of largest eigenvalue is at ind, normalized\n largest = np.array(eigenvectors[:, ind]).flatten().real\n norm = largest.sum()\n return dict(zip(G, map(float, largest / norm)))\n\n", + "url": "https://github.com/networkx/networkx.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 133, + "n_words": 77, + "vocab_size": 65, + "complexity": 2, + "nloc": 14, + "token_counts": 137, + "n_ast_nodes": 212, + "n_identifiers": 32, + "d_id": 42053, + "documentation": { + "docstring": "Returns the PageRank of the nodes in the graph.\n\n PageRank computes a ranking of the nodes in the graph G based on\n the structure of the incoming links. It was originally designed as\n an algorithm to rank web pages.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph. Undirected graphs will be converted to a directed\n graph with two directed edges for each undirected edge.\n\n alpha : float, optional\n Damping parameter for PageRank, default=0.85.\n\n personalization: dict, optional\n The \"personalization vector\" consisting of a dictionary with a\n key some subset of graph nodes and personalization value each of those.\n At least one personalization value must be non-zero.\n If not specfiied, a nodes personalization value will be zero.\n By default, a uniform distribution is used.\n\n weight : key, optional\n Edge data key to use as weight. If None weights are set to 1.\n\n dangling: dict, optional\n The outedges to be assigned to any \"dangling\" nodes, i.e., nodes without\n any outedges. The dict key is the node the outedge points to and the dict\n value is the weight of that outedge. By default, dangling nodes are given\n outedges according to the personalization vector (uniform if not\n specified) This must be selected to result in an irreducible transition\n matrix (see notes under google_matrix). It may be common to have the\n dangling dict to be the same as the personalization dict.\n\n Returns\n -------\n pagerank : dictionary\n Dictionary of nodes with PageRank as value.\n\n Examples\n --------\n >>> G = nx.DiGraph(nx.path_graph(4))\n >>> pr = nx.pagerank_numpy(G, alpha=0.9)\n\n Notes\n -----\n The eigenvector calculation uses NumPy's interface to the LAPACK\n eigenvalue solvers. This will be the fastest and most accurate\n for small graphs.\n\n This implementation works with Multi(Di)Graphs. For multigraphs the\n weight between two nodes is set to be the sum of all edge weights\n between those nodes.\n\n See Also\n --------\n pagerank, pagerank_scipy, google_matrix\n\n References\n ----------\n .. [1] A. Langville and C. Meyer,\n \"A survey of eigenvector methods of web information retrieval.\"\n http://citeseer.ist.psu.edu/713792.html\n .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,\n The PageRank citation ranking: Bringing order to the Web. 1999\n http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf\n ", + "n_words": 344, + "vocab_size": 202, + "n_whitespaces": 556, + "language": "en" + } + }, + { + "id": 269365, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/applications/densenet.py", + "file_name": "densenet.py", + "fun_name": "decode_predictions", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def decode_predictions(preds, top=5):\n return imagenet_utils.decode_predictions(preds, top=top)\n\n\npreprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(\n mode=\"\",\n ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TORCH,\n error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,\n)\ndecode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__\n\nDOC = \n\nsetattr(DenseNet121, \"__doc__\", DenseNet121.__doc__ + DOC)\nsetattr(DenseNet169, \"__doc__\", DenseNet169.__doc__ + DOC)\nsetattr(DenseNet201, \"__doc__\", DenseNet201.__doc__ + DOC)\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 38, + "n_words": 33, + "vocab_size": 25, + "complexity": 1, + "nloc": 2, + "token_counts": 20, + "n_ast_nodes": 149, + "n_identifiers": 18, + "d_id": 80037, + "documentation": { + "docstring": "\n\n Reference:\n - [Densely Connected Convolutional Networks](\n https://arxiv.org/abs/1608.06993) (CVPR 2017)\n\n Optionally loads weights pre-trained on ImageNet.\n Note that the data format convention used by the model is\n the one specified in your Keras config at `~/.keras/keras.json`.\n\n Note: each Keras Application expects a specific kind of input preprocessing.\n For DenseNet, call `tf.keras.applications.densenet.preprocess_input` on your\n inputs before passing them to the model.\n\n Args:\n include_top: whether to include the fully-connected\n layer at the top of the network.\n weights: one of `None` (random initialization),\n 'imagenet' (pre-training on ImageNet),\n or the path to the weights file to be loaded.\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: optional shape tuple, only to be specified\n if `include_top` is False (otherwise the input shape\n has to be `(224, 224, 3)` (with `'channels_last'` data format)\n or `(3, 224, 224)` (with `'channels_first'` data format).\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 32.\n E.g. `(200, 200, 3)` would be one valid value.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model will be\n the 4D tensor output of the\n last convolutional block.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional block, and thus\n the output of the model will be a 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n classifier_activation: A `str` or callable. The activation function to use\n on the \"top\" layer. Ignored unless `include_top=True`. Set\n `classifier_activation=None` to return the logits of the \"top\" layer.\n When loading pretrained weights, `classifier_activation` can only\n be `None` or `\"softmax\"`.\n\n Returns:\n A Keras model instance.\n", + "n_words": 307, + "vocab_size": 181, + "n_whitespaces": 500, + "language": "en" + } + }, + { + "id": 320280, + "commit_id": "4aa318598fd0dc6c5d4e08dd2a13e7bf614511ec", + "repo": "paperless-ngx", + "path": "src/paperless_mail/tests/test_parsers.py", + "file_name": "test_parsers.py", + "fun_name": "test_tika_parse_unreachable", + "commit_message": "add test comments", + "code": "def test_tika_parse_unreachable(self):\n \n html = '

    Some Text

    '\n\n # Check if exception is raised when Tika cannot be reached.\n self.parser.tika_server = \"\"\n self.assertRaises(ParseError, self.parser.tika_parse, html)\n", + "url": "https://github.com/paperless-ngx/paperless-ngx.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 61, + "n_words": 26, + "vocab_size": 25, + "complexity": 1, + "nloc": 4, + "token_counts": 30, + "n_ast_nodes": 54, + "n_identifiers": 8, + "d_id": 117110, + "documentation": { + "docstring": "\n GIVEN:\n - Fresh start\n WHEN:\n - tika parsing is called but tika is not available\n THEN:\n - a ParseError Exception is thrown\n ", + "n_words": 22, + "vocab_size": 17, + "n_whitespaces": 84, + "language": "en" + } + }, + { + "id": 117210, + "commit_id": "7c02e15aa403a4ca1fa34489dd2df9136d6c961c", + "repo": "mindsdb", + "path": "tests/integration_tests/flows/test_mysql_api.py", + "file_name": "test_mysql_api.py", + "fun_name": "test_train_model_from_files", + "commit_message": "Projects structure (#3532)\n\nProjects structure", + "code": "def test_train_model_from_files(self):\n df = pd.DataFrame({\n 'x1': [x for x in range(100, 210)] + [x for x in range(100, 210)],\n 'x2': [x * 2 for x in range(100, 210)] + [x * 3 for x in range(100, 210)],\n 'y': [x * 3 for x in range(100, 210)] + [x * 2 for x in range(100, 210)]\n })\n file_predictor_name = \"predictor_from_file\"\n self.upload_ds(df, self.file_datasource_name)\n self.verify_file_ds(self.file_datasource_name)\n\n _query = f\n self.query(_query)\n self.check_predictor_readiness(file_predictor_name)\n", + "url": "https://github.com/mindsdb/mindsdb.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 155, + "n_words": 67, + "vocab_size": 29, + "complexity": 7, + "nloc": 16, + "token_counts": 142, + "n_ast_nodes": 228, + "n_identifiers": 14, + "d_id": 25923, + "documentation": { + "docstring": "\n CREATE MODEL {file_predictor_name}\n from files (select * from {self.file_datasource_name})\n predict y;\n ", + "n_words": 11, + "vocab_size": 10, + "n_whitespaces": 52, + "language": "en" + } + }, + { + "id": 320858, + "commit_id": "265b018c172f8c1f6d9e7f8850256363f0629f82", + "repo": "qutebrowser", + "path": "qutebrowser/browser/commands.py", + "file_name": "commands.py", + "fun_name": "_search_cb", + "commit_message": "Add a SearchMatch helper class", + "code": "def _search_cb(self, found, *, tab, old_match, options, text, prev):\n \n # :search/:search-next without reverse -> down\n # :search/:search-next with reverse -> up\n # :search-prev without reverse -> up\n # :search-prev with reverse -> down\n going_up = options['reverse'] ^ prev\n\n if found:\n if not config.val.search.wrap_messages:\n return\n\n # Check if the match count change is opposite to the search direction\n if old_match.current > 0:\n if not going_up:\n if old_match.current > tab.search.match.current:\n message.info(\"Search hit BOTTOM, continuing at TOP\",\n replace=\"search-hit-msg\")\n elif old_match.current == tab.search.match.current:\n message.info(\"Search hit BOTTOM\", replace=\"search-hit-msg\")\n elif going_up:\n if old_match.current < tab.search.match.current:\n message.info(\"Search hit TOP, continuing at BOTTOM\",\n replace=\"search-hit-msg\")\n elif old_match.current == tab.search.match.current:\n message.info(\"Search hit TOP\", replace=\"search-hit-msg\")\n else:\n message.warning(f\"Text '{text}' not found on page!\",\n replace='find-in-page')\n", + "url": "https://github.com/qutebrowser/qutebrowser.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 19, + "n_whitespaces": 546, + "n_words": 112, + "vocab_size": 62, + "complexity": 10, + "nloc": 21, + "token_counts": 161, + "n_ast_nodes": 276, + "n_identifiers": 19, + "d_id": 117399, + "documentation": { + "docstring": "Callback called from search/search_next/search_prev.\n\n Args:\n found: Whether the text was found.\n tab: The AbstractTab in which the search was made.\n old_match: The previously active search match before the search was\n performed.\n options: The options (dict) the search was made with.\n text: The text searched for.\n prev: Whether we're searching backwards (i.e. :search-prev)\n ", + "n_words": 52, + "vocab_size": 38, + "n_whitespaces": 154, + "language": "en" + } + }, + { + "id": 217383, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/fractions.py", + "file_name": "fractions.py", + "fun_name": "from_decimal", + "commit_message": "add python 3.10.4 for windows", + "code": "def from_decimal(cls, dec):\n \n from decimal import Decimal\n if isinstance(dec, numbers.Integral):\n dec = Decimal(int(dec))\n elif not isinstance(dec, Decimal):\n raise TypeError(\n \"%s.from_decimal() only takes Decimals, not %r (%s)\" %\n (cls.__name__, dec, type(dec).__name__))\n return cls(*dec.as_integer_ratio())\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 119, + "n_words": 32, + "vocab_size": 30, + "complexity": 3, + "nloc": 9, + "token_counts": 70, + "n_ast_nodes": 114, + "n_identifiers": 13, + "d_id": 54739, + "documentation": { + "docstring": "Converts a finite Decimal instance to a rational number, exactly.", + "n_words": 10, + "vocab_size": 9, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 244103, + "commit_id": "0932ab787d58eead15b5f823fbcca5351ceb90f7", + "repo": "mmdetection", + "path": "mmdet/models/plugins/pixel_decoder.py", + "file_name": "pixel_decoder.py", + "fun_name": "forward", + "commit_message": "add msdeformattn pixel decoder (#7466)\n\nfix typo\r\n\r\nrm img_metas\r\n\r\nrename in pixel_decoder\r\n\r\nupdate comments\r\n\r\nrename\r\n\r\nfix typo\r\n\r\ngenerae points with MlvlPointGenerator", + "code": "def forward(self, feats, img_metas):\n \n y = self.last_feat_conv(feats[-1])\n for i in range(self.num_inputs - 2, -1, -1):\n x = feats[i]\n cur_feat = self.lateral_convs[i](x)\n y = cur_feat + \\\n F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest')\n y = self.output_convs[i](y)\n\n mask_feature = self.mask_feature(y)\n memory = feats[-1]\n return mask_feature, memory\n\n\n@PLUGIN_LAYERS.register_module()", + "url": "https://github.com/open-mmlab/mmdetection.git", + "language": "Python", + "ast_errors": "@PLUGIN_LAYERS.register_module()", + "n_ast_errors": 1, + "ast_levels": 15, + "n_whitespaces": 142, + "n_words": 42, + "vocab_size": 32, + "complexity": 2, + "nloc": 11, + "token_counts": 113, + "n_ast_nodes": 186, + "n_identifiers": 22, + "d_id": 70240, + "documentation": { + "docstring": "\n Args:\n feats (list[Tensor]): Feature maps of each level. Each has\n shape of (batch_size, c, h, w).\n img_metas (list[dict]): List of image information. Pass in\n for creating more accurate padding mask. Not used here.\n\n Returns:\n tuple: a tuple containing the following:\n - mask_feature (Tensor): Shape (batch_size, c, h, w).\n - memory (Tensor): Output of last stage of backbone.\\\n Shape (batch_size, c, h, w).\n ", + "n_words": 62, + "vocab_size": 47, + "n_whitespaces": 200, + "language": "en" + } + }, + { + "id": 301136, + "commit_id": "3f8c896cb20526ef59e285b1f0eeb9b4e734efee", + "repo": "core", + "path": "tests/components/google/test_config_flow.py", + "file_name": "test_config_flow.py", + "fun_name": "primary_calendar_error", + "commit_message": "Set user friendly name for Google Calendar config entry (#72243)\n\n* Set user friendly name for Google Calendar config entry\r\n\r\n* Add a new auth implementation for use during the config flow", + "code": "async def primary_calendar_error() -> ClientError | None:\n \n return None\n\n\n@pytest.fixture(autouse=True)", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "@pytest.fixture(autouse=True)", + "n_ast_errors": 1, + "ast_levels": 7, + "n_whitespaces": 15, + "n_words": 10, + "vocab_size": 10, + "complexity": 1, + "nloc": 3, + "token_counts": 11, + "n_ast_nodes": 37, + "n_identifiers": 5, + "d_id": 99984, + "documentation": { + "docstring": "Fixture for tests to inject an error during calendar lookup.", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 137593, + "commit_id": "98fef7732852cdb3e9377cd87c1ee1085b894928", + "repo": "ray", + "path": "python/ray/tests/test_runtime_env.py", + "file_name": "test_runtime_env.py", + "fun_name": "test_get_release_wheel_url", + "commit_message": "[runtime env] Support python 3.10 for runtime_env conda (#30970)\n\nSigned-off-by: Archit Kulkarni \r\n\r\nconda environments are isolated, so when runtime_env sets up a conda environment it must download the Ray wheel into the conda environment. It must download the wheel that matches the current Python and Ray version running, otherwise there will be incompatibility issues between the workers that use this runtime_env and the other workers and Ray processes.\r\n\r\nThis PR updates the wheel name format logic to support Python 3.10.", + "code": "def test_get_release_wheel_url():\n \n # This should be a commit for which wheels have already been built for\n # all platforms and python versions at\n # `s3://ray-wheels/releases/2.2.0//`.\n test_commits = {\"2.2.0\": \"b6af0887ee5f2e460202133791ad941a41f15beb\"}\n for sys_platform in [\"darwin\", \"linux\", \"win32\"]:\n for py_version in ray_constants.RUNTIME_ENV_CONDA_PY_VERSIONS:\n for version, commit in test_commits.items():\n if sys_platform == \"win32\" and py_version == (3, 6):\n # Windows wheels are not built for py3.6 anymore\n continue\n url = get_release_wheel_url(commit, sys_platform, version, py_version)\n assert requests.head(url).status_code == 200, url\n\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 193, + "n_words": 74, + "vocab_size": 53, + "complexity": 6, + "nloc": 9, + "token_counts": 80, + "n_ast_nodes": 136, + "n_identifiers": 14, + "d_id": 31197, + "documentation": { + "docstring": "Test the code that generates the filenames of the `release` branch wheels.", + "n_words": 12, + "vocab_size": 10, + "n_whitespaces": 11, + "language": "en" + } + }, + { + "id": 45563, + "commit_id": "3aebb21c523c0eea0d4a1518d502ff95fd98011b", + "repo": "airflow", + "path": "airflow/models/connection.py", + "file_name": "connection.py", + "fun_name": "_validate_extra", + "commit_message": "Deprecate non-JSON conn.extra (#21816)\n\nConnection extra field is generally assumed to be JSON but we don't actually require it. Here we deprecate non-JSON extra so that in 3.0 we can require it. Further, we require that it not just be any json but must also parse as dict, because a string value such as '\"hi\"' or '[1,2,3]' is json, but a very bad practice.", + "code": "def _validate_extra(extra, conn_id) -> None:\n \n if extra is None:\n return None\n try:\n extra_parsed = json.loads(extra)\n if not isinstance(extra_parsed, dict):\n warnings.warn(\n \"Encountered JSON value in `extra` which does not parse as a dictionary in \"\n f\"connection {conn_id!r}. From Airflow 3.0, the `extra` field must contain a JSON \"\n \"representation of a Python dict.\",\n DeprecationWarning,\n stacklevel=3,\n )\n except json.JSONDecodeError:\n warnings.warn(\n f\"Encountered non-JSON in `extra` field for connection {conn_id!r}. Support for \"\n \"non-JSON `extra` will be removed in Airflow 3.0\",\n DeprecationWarning,\n stacklevel=2,\n )\n return None\n", + "url": "https://github.com/apache/airflow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 357, + "n_words": 82, + "vocab_size": 59, + "complexity": 4, + "nloc": 25, + "token_counts": 74, + "n_ast_nodes": 132, + "n_identifiers": 13, + "d_id": 8648, + "documentation": { + "docstring": "\n Here we verify that ``extra`` is a JSON-encoded Python dict. From Airflow 3.0, we should no\n longer suppress these errors but raise instead.\n ", + "n_words": 23, + "vocab_size": 22, + "n_whitespaces": 46, + "language": "en" + } + }, + { + "id": 122898, + "commit_id": "9f811ba54d16d1acc4c0950dd3608d0b59d1af82", + "repo": "jax", + "path": "jax/_src/third_party/scipy/betaln.py", + "file_name": "betaln.py", + "fun_name": "algdiv", + "commit_message": "Address drastic slowdown in mypy runtime", + "code": "def algdiv(a, b):\n \n c0 = 0.833333333333333e-01\n c1 = -0.277777777760991e-02\n c2 = 0.793650666825390e-03\n c3 = -0.595202931351870e-03\n c4 = 0.837308034031215e-03\n c5 = -0.165322962780713e-02\n h = a / b\n c = h / (1 + h)\n x = h / (1 + h)\n d = b + (a - 0.5)\n # Set sN = (1 - x**n)/(1 - x)\n x2 = x * x\n s3 = 1.0 + (x + x2)\n s5 = 1.0 + (x + x2 * s3)\n s7 = 1.0 + (x + x2 * s5)\n s9 = 1.0 + (x + x2 * s7)\n s11 = 1.0 + (x + x2 * s9)\n # Set w = del(b) - del(a + b)\n # where del(x) is defined by ln(gamma(x)) = (x - 0.5)*ln(x) - x + 0.5*ln(2*pi) + del(x)\n t = (1.0 / b) ** 2\n w = ((((c5 * s11 * t + c4 * s9) * t + c3 * s7) * t + c2 * s5) * t + c1 * s3) * t + c0\n w = w * (c / b)\n # Combine the results\n u = d * lax.log1p(a / b)\n v = a * (lax.log(b) - 1.0)\n return jnp.where(u <= v, (w - v) - u, (w - u) - v)\n\n", + "url": "https://github.com/google/jax.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 21, + "n_whitespaces": 290, + "n_words": 209, + "vocab_size": 82, + "complexity": 1, + "nloc": 23, + "token_counts": 269, + "n_ast_nodes": 362, + "n_identifiers": 28, + "d_id": 27264, + "documentation": { + "docstring": "\n Compute ``log(gamma(a))/log(gamma(a + b))`` when ``b >= 8``.\n\n Derived from scipy's implmentation of `algdiv`_.\n\n This differs from the scipy implementation in that it assumes a <= b\n because recomputing ``a, b = jnp.minimum(a, b), jnp.maximum(a, b)`` might\n be expensive and this is only called by ``betaln``.\n\n .. _algdiv:\n https://github.com/scipy/scipy/blob/c89dfc2b90d993f2a8174e57e0cbc8fbe6f3ee19/scipy/special/cdflib/algdiv.f\n ", + "n_words": 49, + "vocab_size": 47, + "n_whitespaces": 78, + "language": "en" + } + }, + { + "id": 168150, + "commit_id": "6ba2a67556526db2e5b0b60a566b5f2039cf4a46", + "repo": "pandas", + "path": "pandas/core/indexes/frozen.py", + "file_name": "frozen.py", + "fun_name": "_disabled", + "commit_message": "TYP: annotate functions that always error with NoReturn (#48002)", + "code": "def _disabled(self, *args, **kwargs) -> NoReturn:\n \n raise TypeError(f\"'{type(self).__name__}' does not support mutable operations.\")\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 27, + "n_words": 13, + "vocab_size": 13, + "complexity": 1, + "nloc": 5, + "token_counts": 20, + "n_ast_nodes": 46, + "n_identifiers": 8, + "d_id": 40216, + "documentation": { + "docstring": "\n This method will not function because object is immutable.\n ", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 24, + "language": "en" + } + }, + { + "id": 100389, + "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", + "repo": "faceswap", + "path": "plugins/train/trainer/_base.py", + "file_name": "_base.py", + "fun_name": "_set_preview_feed", + "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", + "code": "def _set_preview_feed(self):\n \n retval = {}\n for idx, side in enumerate((\"a\", \"b\")):\n logger.debug(\"Setting preview feed: (side: '%s')\", side)\n preview_images = self._config.get(\"preview_images\", 14)\n preview_images = min(max(preview_images, 2), 16)\n batchsize = min(len(self._images[side]), preview_images)\n retval[side] = self._load_generator(idx).minibatch_ab(self._images[side],\n batchsize,\n side,\n do_shuffle=True,\n is_preview=True)\n logger.debug(\"Set preview feed. Batchsize: %s\", batchsize)\n return retval\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 395, + "n_words": 45, + "vocab_size": 38, + "complexity": 2, + "nloc": 14, + "token_counts": 116, + "n_ast_nodes": 184, + "n_identifiers": 20, + "d_id": 19874, + "documentation": { + "docstring": " Set the preview feed for this feeder.\n\n Creates a generator from :class:`lib.training_data.TrainingDataGenerator` specifically\n for previews for the feeder.\n\n Returns\n -------\n dict\n The side (\"a\" or \"b\") as key, :class:`~lib.training_data.TrainingDataGenerator` as\n value.\n ", + "n_words": 31, + "vocab_size": 26, + "n_whitespaces": 96, + "language": "en" + } + }, + { + "id": 118611, + "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", + "repo": "streamlit", + "path": "lib/tests/streamlit/forward_msg_queue_test.py", + "file_name": "forward_msg_queue_test.py", + "fun_name": "test_multiple_containers", + "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", + "code": "def test_multiple_containers(self):\n \n rq = ForwardMsgQueue()\n self.assertTrue(rq.is_empty())\n\n rq.enqueue(NEW_SESSION_MSG)\n", + "url": "https://github.com/streamlit/streamlit.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 35, + "n_words": 7, + "vocab_size": 7, + "complexity": 1, + "nloc": 13, + "token_counts": 114, + "n_ast_nodes": 49, + "n_identifiers": 8, + "d_id": 26316, + "documentation": { + "docstring": "Deltas should only be coalesced if they're in the same container", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 124273, + "commit_id": "89b0b82c13568254d179e63cb1a43b95fe632bd9", + "repo": "ray", + "path": "python/ray/serve/tests/test_json_serde.py", + "file_name": "test_json_serde.py", + "fun_name": "test_simple_class_node_json_serde", + "commit_message": "[Deployment Graph] Move `Deployment` creation outside to build function (#26129)", + "code": "def test_simple_class_node_json_serde(serve_instance):\n \n hello_actor = ClassHello.bind()\n original_dag_node = hello_actor.hello.bind()\n _test_deployment_json_serde_helper(\n original_dag_node,\n expected_num_deployments=1,\n )\n\n model_actor = Model.bind(1)\n original_dag_node = model_actor.forward.bind(1)\n _test_deployment_json_serde_helper(\n original_dag_node,\n expected_num_deployments=1,\n )\n\n model_actor = Model.bind(1, ratio=0.5)\n original_dag_node = model_actor.forward.bind(1)\n _test_deployment_json_serde_helper(\n original_dag_node,\n expected_num_deployments=1,\n )\n\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 114, + "n_words": 33, + "vocab_size": 16, + "complexity": 1, + "nloc": 19, + "token_counts": 91, + "n_ast_nodes": 142, + "n_identifiers": 13, + "d_id": 27557, + "documentation": { + "docstring": "\n Test the following behavior\n 1) Ray DAG node can go through full JSON serde cycle\n 2) Ray DAG node and deserialized DAG node produces same actor instances\n with same method call output\n 3) Ray DAG node can go through multiple rounds of JSON serde and still\n provides the same value as if it's only JSON serde once\n Against following test cases\n - Simple class with no args\n - Simple class with only args, all primitive types\n - Simple class with args + kwargs, all primitive types\n - Simple chain of class method calls, all primitive types\n ", + "n_words": 96, + "vocab_size": 54, + "n_whitespaces": 177, + "language": "en" + } + }, + { + "id": 322359, + "commit_id": "487162262196bead8d9b4c2306f313b8f64edf9b", + "repo": "PaddleNLP", + "path": "examples/text_summarization/prophetnet/evaluate/cnndm/bs_pyrouge.py", + "file_name": "bs_pyrouge.py", + "fun_name": "evaluate", + "commit_message": "Add model Prohetnet (#1698)\n\n* add Prohetnet model\r\n\r\n* update prohetnet\r\n\r\n* update format\r\n\r\n* pre commit\r\n\r\n* add prophetnet example\r\n\r\n* update tokenizer.py,run_train.sh,train_prophetnet.py\r\n\r\n* remove evaluate/gigaword/__init__.py\r\n\r\nCo-authored-by: smallv0221 <33639025+smallv0221@users.noreply.github.com>", + "code": "def evaluate(self, system_id=1, rouge_args=None):\n \n self.write_config(system_id=system_id)\n options = self.__get_options(rouge_args)\n command = [self._bin_path] + options\n self.log.info(\"Running ROUGE with command {}\".format(\" \".join(command)))\n rouge_output = check_output(command).decode(\"UTF-8\")\n return rouge_output\n", + "url": "https://github.com/PaddlePaddle/PaddleNLP.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 73, + "n_words": 24, + "vocab_size": 19, + "complexity": 1, + "nloc": 7, + "token_counts": 70, + "n_ast_nodes": 119, + "n_identifiers": 16, + "d_id": 118144, + "documentation": { + "docstring": "\n Run ROUGE to evaluate the system summaries in system_dir against\n the model summaries in model_dir. The summaries are assumed to\n be in the one-sentence-per-line HTML format ROUGE understands.\n\n system_id: Optional system ID which will be printed in\n ROUGE's output.\n\n Returns: Rouge output as string.\n\n ", + "n_words": 44, + "vocab_size": 33, + "n_whitespaces": 115, + "language": "en" + } + }, + { + "id": 209484, + "commit_id": "e6eaa484b8fa3d10051e82f5a784fe8dedbd5592", + "repo": "scapy", + "path": "scapy/contrib/automotive/scanner/executor.py", + "file_name": "executor.py", + "fun_name": "execute_test_case", + "commit_message": "Add assert to GMLAN Scanner to enforce fast fail on to many open TestSockets\n\nFix bugs in TestSocket\nFix bugs in the AutomotiveScanner execution_time handling\nSimplify test code for UDS_Scanner and reuse ObjectPipes to avoid mass\ncreation", + "code": "def execute_test_case(self, test_case, kill_time=None):\n # type: (AutomotiveTestCaseABC, Optional[float]) -> None\n \n\n test_case.pre_execute(\n self.socket, self.target_state, self.configuration)\n\n try:\n test_case_kwargs = self.configuration[test_case.__class__.__name__]\n except KeyError:\n test_case_kwargs = dict()\n\n if kill_time:\n max_execution_time = max(int(kill_time - time.time()), 5)\n cur_execution_time = test_case_kwargs.get(\"execution_time\", 1200)\n test_case_kwargs[\"execution_time\"] = min(max_execution_time,\n cur_execution_time)\n\n log_interactive.debug(\"[i] Execute test_case %s with args %s\",\n test_case.__class__.__name__, test_case_kwargs)\n\n test_case.execute(self.socket, self.target_state, **test_case_kwargs)\n test_case.post_execute(\n self.socket, self.target_state, self.configuration)\n\n self.check_new_states(test_case)\n self.check_new_testcases(test_case)\n", + "url": "https://github.com/secdev/scapy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 292, + "n_words": 57, + "vocab_size": 48, + "complexity": 3, + "nloc": 19, + "token_counts": 148, + "n_ast_nodes": 234, + "n_identifiers": 26, + "d_id": 52688, + "documentation": { + "docstring": "\n This function ensures the correct execution of a testcase, including\n the pre_execute, execute and post_execute.\n Finally the testcase is asked if a new edge or a new testcase was\n generated.\n\n :param test_case: A test case to be executed\n :param kill_time: If set, this defines the maximum execution time for\n the current test_case\n :return: None\n ", + "n_words": 54, + "vocab_size": 44, + "n_whitespaces": 136, + "language": "en" + } + }, + { + "id": 107061, + "commit_id": "da31ed386482845629a8505f81810ddb341514fb", + "repo": "matplotlib", + "path": "lib/matplotlib/widgets.py", + "file_name": "widgets.py", + "fun_name": "update", + "commit_message": "Fix drawing animated artists changed in selector callback", + "code": "def update(self):\n \n if not self.ax.get_visible() or self.ax.figure._cachedRenderer is None:\n return False\n if self.useblit:\n if self.background is not None:\n self.canvas.restore_region(self.background)\n else:\n self.update_background(None)\n for artist in self.artists + self._get_animated_artists():\n if artist.stale:\n self.ax.draw_artist(artist)\n self.canvas.blit(self.ax.bbox)\n else:\n self.canvas.draw_idle()\n return False\n", + "url": "https://github.com/matplotlib/matplotlib.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 200, + "n_words": 35, + "vocab_size": 26, + "complexity": 7, + "nloc": 15, + "token_counts": 108, + "n_ast_nodes": 177, + "n_identifiers": 19, + "d_id": 22575, + "documentation": { + "docstring": "Draw using blit() or draw_idle(), depending on ``self.useblit``.", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 152959, + "commit_id": "3c740dbfcdd69ddc3ab45a42be996e5c61104342", + "repo": "modin", + "path": "modin/core/dataframe/pandas/dataframe/dataframe.py", + "file_name": "dataframe.py", + "fun_name": "synchronize_labels", + "commit_message": "FEAT-#3111: Ensure relabeling Modin Frame does not lose partition shape (#3662)\n\nCo-authored-by: Devin Petersohn \r\nSigned-off-by: Naren Krishna ", + "code": "def synchronize_labels(self, axis=None):\n \n if axis is None:\n self._deferred_index = True\n self._deferred_column = True\n elif axis == 0:\n self._deferred_index = True\n else:\n self._deferred_column = True\n", + "url": "https://github.com/modin-project/modin.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 96, + "n_words": 24, + "vocab_size": 15, + "complexity": 3, + "nloc": 8, + "token_counts": 42, + "n_ast_nodes": 70, + "n_identifiers": 5, + "d_id": 35205, + "documentation": { + "docstring": "\n Set the deferred axes variables for the ``PandasDataframe``.\n\n Parameters\n ----------\n axis : int, default: None\n The deferred axis.\n 0 for the index, 1 for the columns.\n ", + "n_words": 26, + "vocab_size": 20, + "n_whitespaces": 84, + "language": "en" + } + }, + { + "id": 181817, + "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", + "repo": "tpot", + "path": "tpot/base.py", + "file_name": "base.py", + "fun_name": "_impute_values", + "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", + "code": "def _impute_values(self, features):\n \n if self.verbosity > 1:\n print(\"Imputing missing values in feature set\")\n\n if self._fitted_imputer is None:\n self._fitted_imputer = SimpleImputer(strategy=\"median\")\n self._fitted_imputer.fit(features)\n\n return self._fitted_imputer.transform(features)\n", + "url": "https://github.com/EpistasisLab/tpot.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 84, + "n_words": 23, + "vocab_size": 21, + "complexity": 3, + "nloc": 7, + "token_counts": 53, + "n_ast_nodes": 91, + "n_identifiers": 10, + "d_id": 43601, + "documentation": { + "docstring": "Impute missing values in a feature set.\n\n Parameters\n ----------\n features: array-like {n_samples, n_features}\n A feature matrix\n\n Returns\n -------\n array-like {n_samples, n_features}\n ", + "n_words": 21, + "vocab_size": 17, + "n_whitespaces": 81, + "language": "en" + } + }, + { + "id": 122415, + "commit_id": "2416d154355f19e77b5c1ddf1de1f8552e4a98ad", + "repo": "jax", + "path": "jax/_src/third_party/numpy/linalg.py", + "file_name": "linalg.py", + "fun_name": "_multi_dot_three", + "commit_message": "Call _check_arraylike for jnp.linalg & jnp.fft functions", + "code": "def _multi_dot_three(A, B, C, precision):\n \n a0, a1b0 = A.shape\n b1c0, c1 = C.shape\n # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1\n cost1 = a0 * b1c0 * (a1b0 + c1)\n # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1\n cost2 = a1b0 * c1 * (a0 + b1c0)\n\n if cost1 < cost2:\n return jnp.dot(jnp.dot(A, B, precision=precision), C, precision=precision)\n else:\n return jnp.dot(A, jnp.dot(B, C, precision=precision), precision=precision)\n\n", + "url": "https://github.com/google/jax.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 79, + "n_words": 64, + "vocab_size": 39, + "complexity": 2, + "nloc": 9, + "token_counts": 103, + "n_ast_nodes": 155, + "n_identifiers": 14, + "d_id": 27182, + "documentation": { + "docstring": "\n Find the best order for three arrays and do the multiplication.\n For three arguments `_multi_dot_three` is approximately 15 times faster\n than `_multi_dot_matrix_chain_order`\n ", + "n_words": 22, + "vocab_size": 20, + "n_whitespaces": 27, + "language": "en" + } + }, + { + "id": 135845, + "commit_id": "2ed09c54459cc3f74e2dab13406018698559856c", + "repo": "ray", + "path": "rllib/policy/tests/test_compute_log_likelihoods.py", + "file_name": "test_compute_log_likelihoods.py", + "fun_name": "test_dqn", + "commit_message": "[RLlib] Move all config validation logic into AlgorithmConfig classes. (#29854)", + "code": "def test_dqn(self):\n \n config = dqn.DQNConfig()\n # Soft-Q for DQN.\n config.exploration(exploration_config={\"type\": \"SoftQ\", \"temperature\": 0.5})\n config.debugging(seed=42)\n do_test_log_likelihood(dqn.DQN, config)\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 58, + "n_words": 16, + "vocab_size": 16, + "complexity": 1, + "nloc": 5, + "token_counts": 47, + "n_ast_nodes": 81, + "n_identifiers": 11, + "d_id": 30746, + "documentation": { + "docstring": "Tests, whether DQN correctly computes logp in soft-q mode.", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 223448, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/doctest.py", + "file_name": "doctest.py", + "fun_name": "_extract_future_flags", + "commit_message": "add python 3.10.4 for windows", + "code": "def _extract_future_flags(globs):\n \n flags = 0\n for fname in __future__.all_feature_names:\n feature = globs.get(fname, None)\n if feature is getattr(__future__, fname):\n flags |= feature.compiler_flag\n return flags\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 60, + "n_words": 23, + "vocab_size": 19, + "complexity": 3, + "nloc": 7, + "token_counts": 43, + "n_ast_nodes": 69, + "n_identifiers": 10, + "d_id": 56906, + "documentation": { + "docstring": "\n Return the compiler-flags associated with the future features that\n have been imported into the given namespace (globs).\n ", + "n_words": 17, + "vocab_size": 15, + "n_whitespaces": 27, + "language": "en" + } + }, + { + "id": 72341, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/admin/tests/tests.py", + "file_name": "tests.py", + "fun_name": "test_send_html_email", + "commit_message": "Reformat with black", + "code": "def test_send_html_email(self):\n \n\n send_mail(\n \"Test HTML subject\",\n \"TEXT content\",\n [\"has.html@email.com\"],\n html_message=\"

    Test HTML content

    \",\n )\n send_mail(\"Test TEXT subject\", \"TEXT content\", [\"mr.plain.text@email.com\"])\n\n # Check that the emails were sent\n self.assertEqual(len(mail.outbox), 2)\n\n # check that the first email is the HTML email\n email_message = mail.outbox[0]\n self.assertEqual(email_message.subject, \"Test HTML subject\")\n self.assertEqual(\n email_message.alternatives, [(\"

    Test HTML content

    \", \"text/html\")]\n )\n self.assertEqual(\n email_message.body, \"TEXT content\"\n ) # note: plain text will always be added to body, even with alternatives\n self.assertEqual(email_message.to, [\"has.html@email.com\"])\n\n # confirm that without html_message kwarg we do not get 'alternatives'\n email_message = mail.outbox[1]\n self.assertEqual(email_message.subject, \"Test TEXT subject\")\n self.assertEqual(email_message.alternatives, [])\n self.assertEqual(email_message.body, \"TEXT content\")\n self.assertEqual(email_message.to, [\"mr.plain.text@email.com\"])\n\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 304, + "n_words": 97, + "vocab_size": 67, + "complexity": 1, + "nloc": 23, + "token_counts": 151, + "n_ast_nodes": 261, + "n_identifiers": 13, + "d_id": 15875, + "documentation": { + "docstring": "Test that the kwarg 'html_message' works as expected on send_mail by creating 'alternatives' on the EmailMessage object", + "n_words": 17, + "vocab_size": 15, + "n_whitespaces": 16, + "language": "en" + } + }, + { + "id": 68856, + "commit_id": "74a782d81d8f8c4a4d9214a9c06377e5e6e464dd", + "repo": "erpnext", + "path": "erpnext/selling/doctype/quotation/quotation.py", + "file_name": "quotation.py", + "fun_name": "set_expired_status", + "commit_message": "refactor: DB independent quoting and truthy/falsy values (#31358)\n\n* refactor: DB independent quoting and truthy/falsy values\r\n\r\n* style: reformat to black spec\r\n\r\n* fix: ifnull -> coalesce\r\n\r\n* fix: coalesce -> Coalesce\r\n\r\n* fix: revert pypika comparison\r\n\r\n* refactor: convert queries to QB\r\n\r\n* fix: incorrect value types for query\r\n\r\n`=` query makes no sense with list of values\r\n\r\n* fix: remove warehouse docstatus condition\r\n\r\n* fix: keep using base rate as rate\r\n\r\nCo-authored-by: Ankush Menat ", + "code": "def set_expired_status():\n\t# filter out submitted non expired quotations whose validity has been ended\n\tcond = \"`tabQuotation`.docstatus = 1 and `tabQuotation`.status != 'Expired' and `tabQuotation`.valid_till < %s\"\n\t# check if those QUO have SO against it\n\tso_against_quo = \n\n\t# if not exists any SO, set status as Expired\n\tfrappe.db.multisql(\n\t\t{\n\t\t\t\"mariadb\": .format(\n\t\t\t\tcond=cond, so_against_quo=so_against_quo\n\t\t\t),\n\t\t\t\"postgres\": .format(\n\t\t\t\tcond=cond, so_against_quo=so_against_quo\n\t\t\t),\n\t\t},\n\t\t(nowdate()),\n\t)\n\n\n@frappe.whitelist()", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "@frappe.whitelist()", + "n_ast_errors": 1, + "ast_levels": 12, + "n_whitespaces": 47, + "n_words": 64, + "vocab_size": 54, + "complexity": 1, + "nloc": 20, + "token_counts": 56, + "n_ast_nodes": 111, + "n_identifiers": 9, + "d_id": 14922, + "documentation": { + "docstring": "\n\t\tSELECT\n\t\t\tso.name FROM `tabSales Order` so, `tabSales Order Item` so_item\n\t\tWHERE\n\t\t\tso_item.docstatus = 1 and so.docstatus = 1\n\t\t\tand so_item.parent = so.name\n\t\t\tand so_item.prevdoc_docname = `tabQuotation`.nameUPDATE `tabQuotation` SET `tabQuotation`.status = 'Expired' WHERE {cond} and not exists({so_against_quo})UPDATE `tabQuotation` SET status = 'Expired' FROM `tabSales Order`, `tabSales Order Item` WHERE {cond} and not exists({so_against_quo})", + "n_words": 52, + "vocab_size": 28, + "n_whitespaces": 47, + "language": "en" + } + }, + { + "id": 65117, + "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", + "repo": "erpnext", + "path": "erpnext/accounts/general_ledger.py", + "file_name": "general_ledger.py", + "fun_name": "check_freezing_date", + "commit_message": "style: format code with black", + "code": "def check_freezing_date(posting_date, adv_adj=False):\n\t\n\tif not adv_adj:\n\t\tacc_frozen_upto = frappe.db.get_value(\"Accounts Settings\", None, \"acc_frozen_upto\")\n\t\tif acc_frozen_upto:\n\t\t\tfrozen_accounts_modifier = frappe.db.get_value(\n\t\t\t\t\"Accounts Settings\", None, \"frozen_accounts_modifier\"\n\t\t\t)\n\t\t\tif getdate(posting_date) <= getdate(acc_frozen_upto) and (\n\t\t\t\tfrozen_accounts_modifier not in frappe.get_roles() or frappe.session.user == \"Administrator\"\n\t\t\t):\n\t\t\t\tfrappe.throw(\n\t\t\t\t\t_(\"You are not authorized to add or update entries before {0}\").format(\n\t\t\t\t\t\tformatdate(acc_frozen_upto)\n\t\t\t\t\t)\n\t\t\t\t)\n\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 18, + "n_whitespaces": 37, + "n_words": 52, + "vocab_size": 41, + "complexity": 6, + "nloc": 15, + "token_counts": 92, + "n_ast_nodes": 157, + "n_identifiers": 16, + "d_id": 13794, + "documentation": { + "docstring": "\n\tNobody can do GL Entries where posting date is before freezing date\n\texcept authorized person\n\n\tAdministrator has all the roles so this check will be bypassed if any role is allowed to post\n\tHence stop admin to bypass if accounts are freezed\n\t", + "n_words": 42, + "vocab_size": 38, + "n_whitespaces": 38, + "language": "en" + } + }, + { + "id": 163539, + "commit_id": "2e5b05e5773f02b653d66373787b493bc3cf3abc", + "repo": "pandas", + "path": "pandas/tests/test_expressions.py", + "file_name": "test_expressions.py", + "fun_name": "test_run_binary", + "commit_message": "STYLE: use option_context almost always (#45407)", + "code": "def test_run_binary(self, df, flex, comparison_op):\n \n arith = comparison_op.__name__\n with option_context(\"compute.use_numexpr\", False):\n other = df.copy() + 1\n\n expr._MIN_ELEMENTS = 0\n expr.set_test_mode(True)\n\n result, expected = self.call_op(df, other, flex, arith)\n\n used_numexpr = expr.get_test_result()\n assert used_numexpr, \"Did not use numexpr as expected.\"\n tm.assert_equal(expected, result)\n\n # FIXME: dont leave commented-out\n # series doesn't uses vec_compare instead of numexpr...\n # for i in range(len(df.columns)):\n # binary_comp = other.iloc[:, i] + 1\n # self.run_binary(df.iloc[:, i], binary_comp, flex)\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 187, + "n_words": 70, + "vocab_size": 58, + "complexity": 1, + "nloc": 10, + "token_counts": 80, + "n_ast_nodes": 134, + "n_identifiers": 20, + "d_id": 39456, + "documentation": { + "docstring": "\n tests solely that the result is the same whether or not numexpr is\n enabled. Need to test whether the function does the correct thing\n elsewhere.\n ", + "n_words": 25, + "vocab_size": 20, + "n_whitespaces": 55, + "language": "en" + } + }, + { + "id": 73961, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/core/templatetags/wagtailcore_tags.py", + "file_name": "wagtailcore_tags.py", + "fun_name": "wagtail_site", + "commit_message": "Reformat with black", + "code": "def wagtail_site(context):\n \n try:\n request = context[\"request\"]\n except KeyError:\n return None\n\n return Site.find_for_request(request=request)\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 38, + "n_words": 12, + "vocab_size": 11, + "complexity": 2, + "nloc": 6, + "token_counts": 28, + "n_ast_nodes": 50, + "n_identifiers": 6, + "d_id": 16194, + "documentation": { + "docstring": "\n Returns the Site object for the given request\n ", + "n_words": 8, + "vocab_size": 7, + "n_whitespaces": 15, + "language": "en" + } + }, + { + "id": 128216, + "commit_id": "18b38c5e23fc54bf0baf7e7dbfcb07640e81f5ef", + "repo": "ray", + "path": "python/ray/serve/schema.py", + "file_name": "schema.py", + "fun_name": "get_user_configured_option_names", + "commit_message": "[Serve] Track user-configured options in Serve deployments (#28313)", + "code": "def get_user_configured_option_names(self) -> Set[str]:\n \n\n return {\n field for field, value in self.dict().items() if value is not DEFAULT.VALUE\n }\n\n\n@PublicAPI(stability=\"beta\")", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "@PublicAPI(stability=\"beta\")", + "n_ast_errors": 1, + "ast_levels": 11, + "n_whitespaces": 50, + "n_words": 19, + "vocab_size": 18, + "complexity": 3, + "nloc": 8, + "token_counts": 36, + "n_ast_nodes": 72, + "n_identifiers": 12, + "d_id": 28630, + "documentation": { + "docstring": "Get set of names for all user-configured options.\n\n Any field not set to DEFAULT.VALUE is considered a user-configured option.\n ", + "n_words": 19, + "vocab_size": 17, + "n_whitespaces": 33, + "language": "en" + } + }, + { + "id": 209791, + "commit_id": "a2b7a28faff1db058dd22ce097a268e0ad5d1d33", + "repo": "scapy", + "path": "scapy/arch/windows/__init__.py", + "file_name": "__init__.py", + "fun_name": "setmodulation", + "commit_message": "[Hinty] Core typing: windows (#3684)\n\n* Core typing: windows\r\n\r\nCo-authored-by: Pierre ", + "code": "def setmodulation(self, modu):\n # type: (int) -> bool\n \n # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11 # noqa: E501\n self._check_npcap_requirement()\n _modus = {\n 0: \"dsss\",\n 1: \"fhss\",\n 2: \"irbaseband\",\n 3: \"ofdm\",\n 4: \"hrdss\",\n 5: \"erp\",\n 6: \"ht\",\n 7: \"vht\",\n 8: \"ihv\",\n 9: \"mimo-ofdm\",\n 10: \"mimo-ofdm\",\n }\n m = _modus.get(modu, \"unknown\") if isinstance(modu, int) else modu\n return self._npcap_set(\"modu\", str(m))\n\n", + "url": "https://github.com/secdev/scapy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 232, + "n_words": 54, + "vocab_size": 50, + "complexity": 2, + "nloc": 17, + "token_counts": 92, + "n_ast_nodes": 159, + "n_identifiers": 11, + "d_id": 52781, + "documentation": { + "docstring": "Set the interface modulation. It can be:\n - 0: dsss\n - 1: fhss\n - 2: irbaseband\n - 3: ofdm\n - 4: hrdss\n - 5: erp\n - 6: ht\n - 7: vht\n - 8: ihv\n - 9: mimo-ofdm\n - 10: mimo-ofdm\n - the value directly\n Only available with Npcap.", + "n_words": 48, + "vocab_size": 35, + "n_whitespaces": 174, + "language": "en" + } + }, + { + "id": 60680, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_internal/distributions/__init__.py", + "file_name": "__init__.py", + "fun_name": "make_distribution_for_install_requirement", + "commit_message": "upd; format", + "code": "def make_distribution_for_install_requirement(install_req):\n # type: (InstallRequirement) -> AbstractDistribution\n \n # Editable requirements will always be source distributions. They use the\n # legacy logic until we create a modern standard for them.\n if install_req.editable:\n return SourceDistribution(install_req)\n\n # If it's a wheel, it's a WheelDistribution\n if install_req.is_wheel:\n return WheelDistribution(install_req)\n\n # Otherwise, a SourceDistribution\n return SourceDistribution(install_req)\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 92, + "n_words": 51, + "vocab_size": 39, + "complexity": 3, + "nloc": 6, + "token_counts": 31, + "n_ast_nodes": 58, + "n_identifiers": 6, + "d_id": 12243, + "documentation": { + "docstring": "Returns a Distribution for the given InstallRequirement", + "n_words": 7, + "vocab_size": 7, + "n_whitespaces": 6, + "language": "en" + } + }, + { + "id": 46606, + "commit_id": "b5a786b38148295c492da8ab731d5e2f6f86ccf7", + "repo": "airflow", + "path": "airflow/providers_manager.py", + "file_name": "providers_manager.py", + "fun_name": "_discover_all_providers_from_packages", + "commit_message": "Suppress import errors for providers from sources (#22579)\n\nWhen we are running airflow locally with providers installed from sources, often many providers will be discovered which we haven't installed the deps for. This generally results in a very large amount of traceback logging, which has a very negative effect on usefulness of terminal output. Here we suppress this error logging for providers that are installed from sources.", + "code": "def _discover_all_providers_from_packages(self) -> None:\n \n for entry_point, dist in entry_points_with_dist('apache_airflow_provider'):\n package_name = dist.metadata['name']\n if self._provider_dict.get(package_name) is not None:\n continue\n log.debug(\"Loading %s from package %s\", entry_point, package_name)\n version = dist.version\n provider_info = entry_point.load()()\n self._provider_schema_validator.validate(provider_info)\n provider_info_package_name = provider_info['package-name']\n if package_name != provider_info_package_name:\n raise Exception(\n f\"The package '{package_name}' from setuptools and \"\n f\"{provider_info_package_name} do not match. Please make sure they are aligned\"\n )\n if package_name not in self._provider_dict:\n self._provider_dict[package_name] = ProviderInfo(version, provider_info, 'package')\n else:\n log.warning(\n \"The provider for package '%s' could not be registered from because providers for that \"\n \"package name have already been registered\",\n package_name,\n )\n", + "url": "https://github.com/apache/airflow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 403, + "n_words": 94, + "vocab_size": 72, + "complexity": 5, + "nloc": 31, + "token_counts": 126, + "n_ast_nodes": 221, + "n_identifiers": 20, + "d_id": 8934, + "documentation": { + "docstring": "\n Discovers all providers by scanning packages installed. The list of providers should be returned\n via the 'apache_airflow_provider' entrypoint as a dictionary conforming to the\n 'airflow/provider_info.schema.json' schema. Note that the schema is different at runtime\n than provider.yaml.schema.json. The development version of provider schema is more strict and changes\n together with the code. The runtime version is more relaxed (allows for additional properties)\n and verifies only the subset of fields that are needed at runtime.\n ", + "n_words": 73, + "vocab_size": 55, + "n_whitespaces": 123, + "language": "en" + } + }, + { + "id": 251191, + "commit_id": "fdde9ba3b3caaa2654048cec0af07bfcc3a6a3f8", + "repo": "mitmproxy", + "path": "mitmproxy/addons/modifybody.py", + "file_name": "modifybody.py", + "fun_name": "load", + "commit_message": "use Python 3.9+ typing", + "code": "def load(self, loader):\n loader.add_option(\n \"modify_body\", Sequence[str], [],\n \n )\n", + "url": "https://github.com/mitmproxy/mitmproxy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 44, + "n_words": 8, + "vocab_size": 8, + "complexity": 1, + "nloc": 9, + "token_counts": 23, + "n_ast_nodes": 37, + "n_identifiers": 6, + "d_id": 73629, + "documentation": { + "docstring": "\n Replacement pattern of the form \"[/flow-filter]/regex/[@]replacement\", where\n the separator can be any character. The @ allows to provide a file path that\n is used to read the replacement string.\n ", + "n_words": 29, + "vocab_size": 26, + "n_whitespaces": 74, + "language": "en" + } + }, + { + "id": 140298, + "commit_id": "eaed256d6863c529b8ada42514f7fba12d146f22", + "repo": "ray", + "path": "rllib/execution/parallel_requests.py", + "file_name": "parallel_requests.py", + "fun_name": "get_ready", + "commit_message": "[RLlib] Async parallel execution manager. (#24423)", + "code": "def get_ready(self) -> Dict[ActorHandle, List[Any]]:\n \n ready_requests_dict = defaultdict(list)\n ready_requests, self._pending_remotes = ray.wait(\n self._pending_remotes,\n timeout=self._ray_wait_timeout_s,\n num_returns=len(self._pending_remotes),\n )\n if not self._return_object_refs:\n objs = ray.get(ready_requests)\n else:\n objs = ready_requests\n for req, obj in zip(ready_requests, objs):\n actor = self._pending_to_actor[req]\n self._remote_requests_in_flight[actor].remove(req)\n ready_requests_dict[actor].append(obj)\n del self._pending_to_actor[req]\n del ready_requests\n return dict(ready_requests_dict)\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 205, + "n_words": 43, + "vocab_size": 35, + "complexity": 3, + "nloc": 26, + "token_counts": 125, + "n_ast_nodes": 193, + "n_identifiers": 29, + "d_id": 31919, + "documentation": { + "docstring": "Get results that are ready to be returned\n\n Returns:\n A dictionary of actor handles to lists of returns from tasks that were\n previously submitted to this actor pool that are now ready to be returned.\n If return_object_refs\n\n ", + "n_words": 37, + "vocab_size": 27, + "n_whitespaces": 86, + "language": "en" + } + }, + { + "id": 222578, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/distutils/ccompiler.py", + "file_name": "ccompiler.py", + "fun_name": "set_link_objects", + "commit_message": "add python 3.10.4 for windows", + "code": "def set_link_objects(self, objects):\n \n self.objects = objects[:]\n\n\n # -- Private utility methods --------------------------------------\n # (here for the convenience of subclasses)\n\n # Helper method to prep compiler in subclass compile() methods\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 52, + "n_words": 29, + "vocab_size": 26, + "complexity": 1, + "nloc": 2, + "token_counts": 16, + "n_ast_nodes": 31, + "n_identifiers": 3, + "d_id": 56651, + "documentation": { + "docstring": "Set the list of object files (or analogues) to be included in\n every link to 'objects'. This does not affect any standard object\n files that the linker may include by default (such as system\n libraries).\n ", + "n_words": 35, + "vocab_size": 31, + "n_whitespaces": 64, + "language": "en" + } + }, + { + "id": 126155, + "commit_id": "4ab97399cda80be1e146946c43d6fb2926248b28", + "repo": "ray", + "path": "dashboard/modules/serve/serve_agent.py", + "file_name": "serve_agent.py", + "fun_name": "get_serve_client", + "commit_message": "[Serve] Only start Serve in the CLI through the `serve deploy` command (#27063)\n\nThese Serve CLI commands start Serve if it's not already running:\r\n\r\n* `serve deploy`\r\n* `serve config`\r\n* `serve status`\r\n* `serve shutdown`\r\n\r\n#27026 introduces the ability to specify a `host` and `port` in the Serve config file. However, once Serve starts running, changing these options requires tearing down the entire Serve application and relaunching it. This limitation is an issue because users can inadvertently start Serve by running one of the `GET`-based CLI commands (i.e. `serve config` or `serve status`) before running `serve deploy`.\r\n\r\nThis change makes `serve deploy` the only CLI command that can start a Serve application on a Ray cluster. The other commands have updated behavior when Serve is not yet running on the cluster.\r\n\r\n* `serve config`: prints an empty config body.\r\n\r\n```yaml\r\nimport_path: ''\r\nruntime_env: {}\r\ndeployments: []\r\n```\r\n\r\n* `serve status`: prints an empty status body, with a new `app_status` `status` value: `NOT_STARTED`.\r\n\r\n```yaml\r\napp_status:\r\n status: NOT_STARTED\r\n message: ''\r\n deployment_timestamp: 0\r\ndeployment_statuses: []\r\n```\r\n\r\n* `serve shutdown`: performs a no-op.", + "code": "def get_serve_client(self):\n \n\n from ray.serve.context import get_global_client\n from ray.serve.exceptions import RayServeException\n\n try:\n return get_global_client(_health_check_controller=True)\n except RayServeException:\n logger.debug(\"There's no Serve app running on this Ray cluster.\")\n return None\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 94, + "n_words": 26, + "vocab_size": 23, + "complexity": 2, + "nloc": 8, + "token_counts": 42, + "n_ast_nodes": 70, + "n_identifiers": 11, + "d_id": 28078, + "documentation": { + "docstring": "Gets the ServeControllerClient to the this cluster's Serve app.\n\n return: If Serve is running on this Ray cluster, returns a client to\n the Serve controller. If Serve is not running, returns None.\n ", + "n_words": 32, + "vocab_size": 22, + "n_whitespaces": 57, + "language": "en" + } + }, + { + "id": 49439, + "commit_id": "9b3119dfb63c4cbb7acfb9f1f1c09ac24e6d68d2", + "repo": "PaddleHub", + "path": "modules/image/text_recognition/ppocrv3_det_ch/processor.py", + "file_name": "processor.py", + "fun_name": "resize_image_type0", + "commit_message": "add module", + "code": "def resize_image_type0(self, img):\n \n limit_side_len = self.max_side_len\n h, w, _ = img.shape\n\n # limit the max side\n if max(h, w) > limit_side_len:\n if h > w:\n ratio = float(limit_side_len) / h\n else:\n ratio = float(limit_side_len) / w\n else:\n ratio = 1.\n resize_h = int(h * ratio)\n resize_w = int(w * ratio)\n\n resize_h = int(round(resize_h / 32) * 32)\n resize_w = int(round(resize_w / 32) * 32)\n\n try:\n if int(resize_w) <= 0 or int(resize_h) <= 0:\n return None, (None, None)\n img = cv2.resize(img, (int(resize_w), int(resize_h)))\n except:\n print(img.shape, resize_w, resize_h)\n sys.exit(0)\n ratio_h = resize_h / float(h)\n ratio_w = resize_w / float(w)\n # return img, np.array([h, w])\n return img, [ratio_h, ratio_w]\n", + "url": "https://github.com/PaddlePaddle/PaddleHub.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 340, + "n_words": 106, + "vocab_size": 66, + "complexity": 6, + "nloc": 24, + "token_counts": 190, + "n_ast_nodes": 302, + "n_identifiers": 23, + "d_id": 9744, + "documentation": { + "docstring": "\n resize image to a size multiple of 32 which is required by the network\n args:\n img(array): array with shape [h, w, c]\n return(tuple):\n img, (ratio_h, ratio_w)\n ", + "n_words": 26, + "vocab_size": 26, + "n_whitespaces": 77, + "language": "en" + } + }, + { + "id": 247312, + "commit_id": "2ffaf30803f93273a4d8a65c9e6c3110c8433488", + "repo": "synapse", + "path": "tests/rest/client/test_rooms.py", + "file_name": "test_rooms.py", + "fun_name": "test_finds_message", + "commit_message": "Add type hints to `tests/rest/client` (#12108)\n\n* Add type hints to `tests/rest/client`\r\n\r\n* newsfile\r\n\r\n* fix imports\r\n\r\n* add `test_account.py`\r\n\r\n* Remove one type hint in `test_report_event.py`\r\n\r\n* change `on_create_room` to `async`\r\n\r\n* update new functions in `test_third_party_rules.py`\r\n\r\n* Add `test_filter.py`\r\n\r\n* add `test_rooms.py`\r\n\r\n* change to `assertEquals` to `assertEqual`\r\n\r\n* lint", + "code": "def test_finds_message(self) -> None:\n \n # The other user sends some messages\n self.helper.send(self.room, body=\"Hi!\", tok=self.other_access_token)\n self.helper.send(self.room, body=\"There!\", tok=self.other_access_token)\n\n channel = self.make_request(\n \"POST\",\n \"/search?access_token=%s\" % (self.access_token,),\n {\n \"search_categories\": {\n \"room_events\": {\"keys\": [\"content.body\"], \"search_term\": \"Hi\"}\n }\n },\n )\n\n # Check we get the results we expect -- one search result, of the sent\n # messages\n self.assertEqual(channel.code, 200)\n results = channel.json_body[\"search_categories\"][\"room_events\"]\n self.assertEqual(results[\"count\"], 1)\n self.assertEqual(results[\"results\"][0][\"result\"][\"content\"][\"body\"], \"Hi!\")\n\n # No context was requested, so we should get none.\n self.assertEqual(results[\"results\"][0][\"context\"], {})\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 264, + "n_words": 73, + "vocab_size": 60, + "complexity": 1, + "nloc": 21, + "token_counts": 159, + "n_ast_nodes": 277, + "n_identifiers": 15, + "d_id": 71583, + "documentation": { + "docstring": "\n The search functionality will search for content in messages if asked to\n do so.\n ", + "n_words": 14, + "vocab_size": 13, + "n_whitespaces": 36, + "language": "en" + } + }, + { + "id": 100443, + "commit_id": "aa39234538a8f83e6aa2b60b8275a570e8876ac2", + "repo": "faceswap", + "path": "plugins/extract/detect/s3fd.py", + "file_name": "s3fd.py", + "fun_name": "conv_block", + "commit_message": "Update all Keras Imports to be conditional (#1214)\n\n* Remove custom keras importer\r\n\r\n* first round keras imports fix\r\n\r\n* launcher.py: Remove KerasFinder references\r\n\r\n* 2nd round keras imports update (lib and extract)\r\n\r\n* 3rd round keras imports update (train)\r\n\r\n* remove KerasFinder from tests\r\n\r\n* 4th round keras imports update (tests)", + "code": "def conv_block(cls, inputs, filters, idx, recursions):\n \n name = f\"conv{idx}\"\n var_x = inputs\n for i in range(1, recursions + 1):\n rec_name = f\"{name}_{i}\"\n var_x = ZeroPadding2D(1, name=f\"{rec_name}.zeropad\")(var_x)\n var_x = Conv2D(filters,\n kernel_size=3,\n strides=1,\n activation=\"relu\",\n name=rec_name)(var_x)\n return var_x\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 207, + "n_words": 35, + "vocab_size": 28, + "complexity": 2, + "nloc": 12, + "token_counts": 78, + "n_ast_nodes": 135, + "n_identifiers": 16, + "d_id": 19921, + "documentation": { + "docstring": " First round convolutions with zero padding added.\n\n Parameters\n ----------\n inputs: tensor\n The input tensor to the convolution block\n filters: int\n The number of filters\n idx: int\n The layer index for naming\n recursions: int\n The number of recursions of the block to perform\n\n Returns\n -------\n tensor\n The output tensor from the convolution block\n ", + "n_words": 52, + "vocab_size": 34, + "n_whitespaces": 178, + "language": "en" + } + }, + { + "id": 119172, + "commit_id": "2388e353da9768a5e714a83b360bac1e920ff7ae", + "repo": "jax", + "path": "build/build.py", + "file_name": "build.py", + "fun_name": "get_bazel_path", + "commit_message": "Increase bazel version to 5.0.0 to match TensorFlow\n(https://github.com/tensorflow/tensorflow/commit/8871926b0aa9d5b0e819d12f6945bce752fde610).", + "code": "def get_bazel_path(bazel_path_flag):\n \n for path in filter(None, get_bazel_paths(bazel_path_flag)):\n version = get_bazel_version(path)\n if version is not None and version >= (5, 0, 0):\n return path, \".\".join(map(str, version))\n\n print(\"Cannot find or download a suitable version of bazel.\"\n \"Please install bazel >= 5.0.0.\")\n sys.exit(-1)\n\n", + "url": "https://github.com/google/jax.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 62, + "n_words": 40, + "vocab_size": 36, + "complexity": 4, + "nloc": 8, + "token_counts": 67, + "n_ast_nodes": 112, + "n_identifiers": 13, + "d_id": 26549, + "documentation": { + "docstring": "Returns the path to a Bazel binary, downloading Bazel if not found. Also,\n checks Bazel's version is at least newer than 5.0.0\n\n A manual version check is needed only for really old bazel versions.\n Newer bazel releases perform their own version check against .bazelversion\n (see for details\n https://blog.bazel.build/2019/12/19/bazel-2.0.html#other-important-changes).\n ", + "n_words": 48, + "vocab_size": 41, + "n_whitespaces": 54, + "language": "en" + } + }, + { + "id": 244223, + "commit_id": "24f2fdb38481e6c013a588660c044e410148ce1e", + "repo": "mmdetection", + "path": "mmdet/utils/util_distribution.py", + "file_name": "util_distribution.py", + "fun_name": "build_ddp", + "commit_message": "fix lint (#7793)", + "code": "def build_ddp(model, device='cuda', *args, **kwargs):\n \n assert device in ['cuda', 'mlu'], 'Only available for cuda or mlu devices.'\n if device == 'cuda':\n model = model.cuda()\n elif device == 'mlu':\n from mmcv.device.mlu import MLUDistributedDataParallel\n ddp_factory['mlu'] = MLUDistributedDataParallel\n model = model.mlu()\n\n return ddp_factory[device](model, *args, **kwargs)\n\n", + "url": "https://github.com/open-mmlab/mmdetection.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 85, + "n_words": 42, + "vocab_size": 34, + "complexity": 3, + "nloc": 9, + "token_counts": 78, + "n_ast_nodes": 133, + "n_identifiers": 10, + "d_id": 70288, + "documentation": { + "docstring": "Build DistributedDataParallel module by device type.\n\n If device is cuda, return a MMDistributedDataParallel model;\n if device is mlu, return a MLUDistributedDataParallel model.\n\n Args:\n model (:class:`nn.Module`): module to be parallelized.\n device (str): device type, mlu or cuda.\n\n Returns:\n :class:`nn.Module`: the module to be parallelized\n\n References:\n .. [1] https://pytorch.org/docs/stable/generated/torch.nn.parallel.\n DistributedDataParallel.html\n ", + "n_words": 48, + "vocab_size": 37, + "n_whitespaces": 114, + "language": "en" + } + }, + { + "id": 241553, + "commit_id": "a610e043d797ca0bae1ce186829fece79077407a", + "repo": "lightning", + "path": "pytorch_lightning/utilities/enums.py", + "file_name": "enums.py", + "fun_name": "detect_current_mode", + "commit_message": "Add typing for utilities/enums.py (#11298)", + "code": "def detect_current_mode(cls) -> _FaultTolerantMode:\n \n env_value = os.getenv(\"PL_FAULT_TOLERANT_TRAINING\", \"0\").lower()\n # the int values are kept for backwards compatibility, but long-term we want to keep only the strings\n if env_value in (\"0\", \"disabled\"):\n return _FaultTolerantMode.DISABLED\n elif env_value in (\"1\", \"automatic\"):\n return _FaultTolerantMode.AUTOMATIC\n elif env_value in (\"2\", \"manual\"):\n return _FaultTolerantMode.MANUAL\n raise MisconfigurationException(\n \"The environment flag `PL_FAULT_TOLERANT_TRAINING` should be either 'disabled', 'automatic', or 'manual'.\"\n )\n", + "url": "https://github.com/Lightning-AI/lightning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 161, + "n_words": 61, + "vocab_size": 52, + "complexity": 4, + "nloc": 12, + "token_counts": 66, + "n_ast_nodes": 122, + "n_identifiers": 11, + "d_id": 69581, + "documentation": { + "docstring": "This classmethod detects if `Fault Tolerant` is activated and maps its value to `_FaultTolerantMode`.", + "n_words": 14, + "vocab_size": 14, + "n_whitespaces": 13, + "language": "en" + } + }, + { + "id": 203297, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/apps/config.py", + "file_name": "config.py", + "fun_name": "_path_from_module", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def _path_from_module(self, module):\n \n # See #21874 for extended discussion of the behavior of this method in\n # various cases.\n # Convert to list because __path__ may not support indexing.\n paths = list(getattr(module, \"__path__\", []))\n if len(paths) != 1:\n filename = getattr(module, \"__file__\", None)\n if filename is not None:\n paths = [os.path.dirname(filename)]\n else:\n # For unknown reasons, sometimes the list returned by __path__\n # contains duplicates that must be removed (#25246).\n paths = list(set(paths))\n if len(paths) > 1:\n raise ImproperlyConfigured(\n \"The app module %r has multiple filesystem locations (%r); \"\n \"you must configure this app with an AppConfig subclass \"\n \"with a 'path' class attribute.\" % (module, paths)\n )\n elif not paths:\n raise ImproperlyConfigured(\n \"The app module %r has no filesystem location, \"\n \"you must configure this app with an AppConfig subclass \"\n \"with a 'path' class attribute.\" % module\n )\n return paths[0]\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 432, + "n_words": 142, + "vocab_size": 91, + "complexity": 5, + "nloc": 21, + "token_counts": 108, + "n_ast_nodes": 191, + "n_identifiers": 13, + "d_id": 50287, + "documentation": { + "docstring": "Attempt to determine app's filesystem path from its module.", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 212222, + "commit_id": "c9751009161f092b2e403d8cccccf5252c0dce1a", + "repo": "bokeh", + "path": "bokeh/models/widgets/sliders.py", + "file_name": "sliders.py", + "fun_name": "value_as_datetime", + "commit_message": "Add DatetimeRangeSlider (#12034)\n\n* Add DatetimeRangeSlider\r\n\r\n* Add tests\r\n\r\n* Add docs", + "code": "def value_as_datetime(self) -> tp.Tuple[datetime, datetime] | None:\n \n if self.value is None:\n return None\n v1, v2 = self.value\n if isinstance(v1, numbers.Number):\n d1 = datetime.utcfromtimestamp(v1 / 1000)\n else:\n d1 = v1\n if isinstance(v2, numbers.Number):\n d2 = datetime.utcfromtimestamp(v2 / 1000)\n else:\n d2 = v2\n return d1, d2\n\n value = NonNullable(Tuple(Datetime, Datetime), help=)\n\n value_throttled = Readonly(NonNullable(Tuple(Datetime, Datetime)), help=)\n\n start = NonNullable(Datetime, help=)\n\n end = NonNullable(Datetime, help=)\n\n step = Int(default=3_600_000, help=)\n\n format = Override(default=\"%d %b %Y %H:%M:%S\")\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", + "url": "https://github.com/bokeh/bokeh.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 204, + "n_words": 81, + "vocab_size": 49, + "complexity": 4, + "nloc": 16, + "token_counts": 87, + "n_ast_nodes": 267, + "n_identifiers": 26, + "d_id": 53206, + "documentation": { + "docstring": " Convenience property to retrieve the value tuple as a tuple of\n datetime objects.\n \n Initial or selected range.\n \n Initial or selected value, throttled to report only on mouseup.\n \n The minimum allowable value.\n \n The maximum allowable value.\n \n The step between consecutive values, in units of milliseconds.\n Default is one hour.\n ", + "n_words": 48, + "vocab_size": 38, + "n_whitespaces": 101, + "language": "en" + } + }, + { + "id": 176595, + "commit_id": "8bea55e3071ed71eab4fb6650a45f0cdf5c911d4", + "repo": "networkx", + "path": "networkx/generators/spectral_graph_forge.py", + "file_name": "spectral_graph_forge.py", + "fun_name": "spectral_graph_forge", + "commit_message": "Remove `_mat_spect_approx` in favor of simpler procedure (#5624)\n\n* Replace _mat_spect_approx func internal usage.\r\n\r\n* Rm _mat_spect_approx helper function.", + "code": "def spectral_graph_forge(G, alpha, transformation=\"identity\", seed=None):\n \n import numpy as np\n import scipy as sp\n import scipy.stats # call as sp.stats\n\n available_transformations = [\"identity\", \"modularity\"]\n alpha = np.clip(alpha, 0, 1)\n A = nx.to_numpy_array(G)\n n = A.shape[1]\n level = int(round(n * alpha))\n\n if transformation not in available_transformations:\n msg = f\"{transformation!r} is not a valid transformation. \"\n msg += f\"Transformations: {available_transformations}\"\n raise nx.NetworkXError(msg)\n\n K = np.ones((1, n)) @ A\n\n B = A\n if transformation == \"modularity\":\n B -= K.T @ K / K.sum()\n\n # Compute low-rank approximation of B\n evals, evecs = np.linalg.eigh(B)\n k = np.argsort(np.abs(evals))[::-1] # indices of evals in descending order\n evecs[:, k[np.arange(level, n)]] = 0 # set smallest eigenvectors to 0\n B = evecs @ np.diag(evals) @ evecs.T\n\n if transformation == \"modularity\":\n B += K.T @ K / K.sum()\n\n B = np.clip(B, 0, 1)\n np.fill_diagonal(B, 0)\n\n for i in range(n - 1):\n B[i, i + 1 :] = sp.stats.bernoulli.rvs(B[i, i + 1 :], random_state=seed)\n B[i + 1 :, i] = np.transpose(B[i, i + 1 :])\n\n H = nx.from_numpy_array(B)\n\n return H\n", + "url": "https://github.com/networkx/networkx.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 293, + "n_words": 169, + "vocab_size": 105, + "complexity": 5, + "nloc": 30, + "token_counts": 306, + "n_ast_nodes": 494, + "n_identifiers": 45, + "d_id": 41994, + "documentation": { + "docstring": "Returns a random simple graph with spectrum resembling that of `G`\n\n This algorithm, called Spectral Graph Forge (SGF), computes the\n eigenvectors of a given graph adjacency matrix, filters them and\n builds a random graph with a similar eigenstructure.\n SGF has been proved to be particularly useful for synthesizing\n realistic social networks and it can also be used to anonymize\n graph sensitive data.\n\n Parameters\n ----------\n G : Graph\n alpha : float\n Ratio representing the percentage of eigenvectors of G to consider,\n values in [0,1].\n transformation : string, optional\n Represents the intended matrix linear transformation, possible values\n are 'identity' and 'modularity'\n seed : integer, random_state, or None (default)\n Indicator of numpy random number generation state.\n See :ref:`Randomness`.\n\n Returns\n -------\n H : Graph\n A graph with a similar eigenvector structure of the input one.\n\n Raises\n ------\n NetworkXError\n If transformation has a value different from 'identity' or 'modularity'\n\n Notes\n -----\n Spectral Graph Forge (SGF) generates a random simple graph resembling the\n global properties of the given one.\n It leverages the low-rank approximation of the associated adjacency matrix\n driven by the *alpha* precision parameter.\n SGF preserves the number of nodes of the input graph and their ordering.\n This way, nodes of output graphs resemble the properties of the input one\n and attributes can be directly mapped.\n\n It considers the graph adjacency matrices which can optionally be\n transformed to other symmetric real matrices (currently transformation\n options include *identity* and *modularity*).\n The *modularity* transformation, in the sense of Newman's modularity matrix\n allows the focusing on community structure related properties of the graph.\n\n SGF applies a low-rank approximation whose fixed rank is computed from the\n ratio *alpha* of the input graph adjacency matrix dimension.\n This step performs a filtering on the input eigenvectors similar to the low\n pass filtering common in telecommunications.\n\n The filtered values (after truncation) are used as input to a Bernoulli\n sampling for constructing a random adjacency matrix.\n\n References\n ----------\n .. [1] L. Baldesi, C. T. Butts, A. Markopoulou, \"Spectral Graph Forge:\n Graph Generation Targeting Modularity\", IEEE Infocom, '18.\n https://arxiv.org/abs/1801.01715\n .. [2] M. Newman, \"Networks: an introduction\", Oxford university press,\n 2010\n\n Examples\n --------\n >>> G = nx.karate_club_graph()\n >>> H = nx.spectral_graph_forge(G, 0.3)\n >>>\n ", + "n_words": 358, + "vocab_size": 213, + "n_whitespaces": 582, + "language": "en" + } + }, + { + "id": 218101, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/importlib/_bootstrap_external.py", + "file_name": "_bootstrap_external.py", + "fun_name": "find_loader", + "commit_message": "add python 3.10.4 for windows", + "code": "def find_loader(self, fullname):\n \n _warnings.warn(\"FileFinder.find_loader() is deprecated and \"\n \"slated for removal in Python 3.12; use find_spec() instead\",\n DeprecationWarning)\n spec = self.find_spec(fullname)\n if spec is None:\n return None, []\n return spec.loader, spec.submodule_search_locations or []\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 123, + "n_words": 33, + "vocab_size": 29, + "complexity": 3, + "nloc": 8, + "token_counts": 46, + "n_ast_nodes": 79, + "n_identifiers": 10, + "d_id": 55131, + "documentation": { + "docstring": "Try to find a loader for the specified module, or the namespace\n package portions. Returns (loader, list-of-portions).\n\n This method is deprecated. Use find_spec() instead.\n\n ", + "n_words": 24, + "vocab_size": 23, + "n_whitespaces": 46, + "language": "en" + } + }, + { + "id": 19671, + "commit_id": "9a3b3ce70621af6f9adaa9eeac9cf83fa149319c", + "repo": "pipenv", + "path": "pipenv/environment.py", + "file_name": "environment.py", + "fun_name": "sys_path", + "commit_message": "Issue 4993 Add standard pre commit hooks and apply linting. (#4994)\n\n* Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.", + "code": "def sys_path(self):\n # type: () -> List[str]\n \n\n from .vendor.vistir.compat import JSONDecodeError\n\n current_executable = Path(sys.executable).as_posix()\n if not self.python or self.python == current_executable:\n return sys.path\n elif any([sys.prefix == self.prefix, not self.is_venv]):\n return sys.path\n cmd_args = [self.python, \"-c\", \"import json, sys; print(json.dumps(sys.path))\"]\n path, _ = vistir.misc.run(\n cmd_args,\n return_object=False,\n nospin=True,\n block=True,\n combine_stderr=False,\n write_to_stdout=False,\n )\n try:\n path = json.loads(path.strip())\n except JSONDecodeError:\n path = sys.path\n return path\n", + "url": "https://github.com/pypa/pipenv.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 255, + "n_words": 61, + "vocab_size": 48, + "complexity": 5, + "nloc": 21, + "token_counts": 134, + "n_ast_nodes": 212, + "n_identifiers": 28, + "d_id": 3055, + "documentation": { + "docstring": "\n The system path inside the environment\n\n :return: The :data:`sys.path` from the environment\n :rtype: list\n ", + "n_words": 14, + "vocab_size": 11, + "n_whitespaces": 43, + "language": "en" + } + }, + { + "id": 104228, + "commit_id": "6ed6ac9448311930557810383d2cfd4fe6aae269", + "repo": "datasets", + "path": "src/datasets/arrow_writer.py", + "file_name": "arrow_writer.py", + "fun_name": "parquet_to_arrow", + "commit_message": "Better TQDM output (#3654)\n\n* Show progress bar when generating examples\r\n\r\n* Consistent utils.is_progress_bar_enabled calls\r\n\r\n* Fix tqdm in notebook\r\n\r\n* Add missing params to DatasetDict.map\r\n\r\n* Specify total in tqdm progress bar in map\r\n\r\n* Fix total computation\r\n\r\n* Small fix\r\n\r\n* Add desc to map_nested\r\n\r\n* Add more precise descriptions to download\r\n\r\n* Address comments\r\n\r\n* Fix docstring\r\n\r\n* Final changes\r\n\r\n* Minor change", + "code": "def parquet_to_arrow(sources, destination):\n \n stream = None if isinstance(destination, str) else destination\n disable = not utils.is_progress_bar_enabled()\n with ArrowWriter(path=destination, stream=stream) as writer:\n for source in utils.tqdm(sources, unit=\"sources\", disable=disable):\n pf = pa.parquet.ParquetFile(source)\n for i in utils.tqdm(range(pf.num_row_groups), unit=\"row_groups\", leave=False, disable=disable):\n df = pf.read_row_group(i).to_pandas()\n for col in df.columns:\n df[col] = df[col].apply(json.loads)\n reconstructed_table = pa.Table.from_pandas(df)\n writer.write_table(reconstructed_table)\n return destination\n", + "url": "https://github.com/huggingface/datasets.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 175, + "n_words": 52, + "vocab_size": 41, + "complexity": 5, + "nloc": 13, + "token_counts": 150, + "n_ast_nodes": 236, + "n_identifiers": 35, + "d_id": 21787, + "documentation": { + "docstring": "Convert parquet files to arrow file. Inputs can be str paths or file-like objects", + "n_words": 14, + "vocab_size": 14, + "n_whitespaces": 13, + "language": "en" + } + }, + { + "id": 185240, + "commit_id": "e61eaf7597a1050e80e7ce029737b5544743a2f5", + "repo": "textual", + "path": "src/textual/widgets/_input.py", + "file_name": "_input.py", + "fun_name": "cursor_width", + "commit_message": "replace TextInput with Input", + "code": "def cursor_width(self) -> int:\n \n if self.placeholder and not self.value:\n return cell_len(self.placeholder)\n return self._position_to_cell(len(self.value)) + 1\n", + "url": "https://github.com/Textualize/textual.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 47, + "n_words": 15, + "vocab_size": 14, + "complexity": 3, + "nloc": 5, + "token_counts": 39, + "n_ast_nodes": 65, + "n_identifiers": 8, + "d_id": 44947, + "documentation": { + "docstring": "Get the width of the input (with extra space for cursor at the end).", + "n_words": 14, + "vocab_size": 12, + "n_whitespaces": 13, + "language": "en" + } + }, + { + "id": 247719, + "commit_id": "96274565ff0dbb7d21b02b04fcef115330426707", + "repo": "synapse", + "path": "tests/rest/client/test_relations.py", + "file_name": "test_relations.py", + "fun_name": "test_bundled_aggregations_with_filter", + "commit_message": "Fix bundling aggregations if unsigned is not a returned event field. (#12234)\n\nAn error occured if a filter was supplied with `event_fields` which did not include\r\n`unsigned`.\r\n\r\nIn that case, bundled aggregations are still added as the spec states it is allowed\r\nfor servers to add additional fields.", + "code": "def test_bundled_aggregations_with_filter(self) -> None:\n \n self._send_relation(RelationTypes.ANNOTATION, \"m.reaction\", \"a\")\n\n # Note that the sync filter does not include \"unsigned\" as a field.\n filter = urllib.parse.quote_plus(\n b'{\"event_fields\": [\"content\", \"event_id\"], \"room\": {\"timeline\": {\"limit\": 3}}}'\n )\n channel = self.make_request(\n \"GET\", f\"/sync?filter={filter}\", access_token=self.user_token\n )\n self.assertEqual(200, channel.code, channel.json_body)\n\n # Ensure the timeline is limited, find the parent event.\n room_timeline = channel.json_body[\"rooms\"][\"join\"][self.room][\"timeline\"]\n self.assertTrue(room_timeline[\"limited\"])\n parent_event = self._find_event_in_chunk(room_timeline[\"events\"])\n\n # Ensure there's bundled aggregations on it.\n self.assertIn(\"unsigned\", parent_event)\n self.assertIn(\"m.relations\", parent_event[\"unsigned\"])\n\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 196, + "n_words": 69, + "vocab_size": 59, + "complexity": 1, + "nloc": 21, + "token_counts": 120, + "n_ast_nodes": 211, + "n_identifiers": 22, + "d_id": 71871, + "documentation": { + "docstring": "\n If \"unsigned\" is an omitted field (due to filtering), adding the bundled\n aggregations should not break.\n\n Note that the spec allows for a server to return additional fields beyond\n what is specified.\n ", + "n_words": 32, + "vocab_size": 29, + "n_whitespaces": 68, + "language": "en" + } + }, + { + "id": 23176, + "commit_id": "9f62b610dea6161627200ed85d92e19b1923279a", + "repo": "PaddleOCR", + "path": "ppocr/data/imaug/fce_aug.py", + "file_name": "fce_aug.py", + "fun_name": "sample_crop_box", + "commit_message": "add fcenet", + "code": "def sample_crop_box(self, img_size, results):\n \n\n assert isinstance(img_size, tuple)\n h, w = img_size[:2]\n\n key_masks = results['polys']\n\n x_valid_array = np.ones(w, dtype=np.int32)\n y_valid_array = np.ones(h, dtype=np.int32)\n\n selected_mask = key_masks[np.random.randint(0, len(key_masks))]\n selected_mask = selected_mask.reshape((-1, 2)).astype(np.int32)\n max_x_start = max(np.min(selected_mask[:, 0]) - 2, 0)\n min_x_end = min(np.max(selected_mask[:, 0]) + 3, w - 1)\n max_y_start = max(np.min(selected_mask[:, 1]) - 2, 0)\n min_y_end = min(np.max(selected_mask[:, 1]) + 3, h - 1)\n\n # for key in results.get('mask_fields', []):\n # if len(results[key].masks) == 0:\n # continue\n # masks = results[key].masks\n for mask in key_masks:\n # assert len(mask) == 1\n mask = mask.reshape((-1, 2)).astype(np.int32)\n clip_x = np.clip(mask[:, 0], 0, w - 1)\n clip_y = np.clip(mask[:, 1], 0, h - 1)\n min_x, max_x = np.min(clip_x), np.max(clip_x)\n min_y, max_y = np.min(clip_y), np.max(clip_y)\n\n x_valid_array[min_x - 2:max_x + 3] = 0\n y_valid_array[min_y - 2:max_y + 3] = 0\n\n min_w = int(w * self.min_side_ratio)\n min_h = int(h * self.min_side_ratio)\n\n x1, x2 = self.sample_valid_start_end(x_valid_array, min_w, max_x_start,\n min_x_end)\n y1, y2 = self.sample_valid_start_end(y_valid_array, min_h, max_y_start,\n min_y_end)\n\n return np.array([x1, y1, x2, y2])\n", + "url": "https://github.com/PaddlePaddle/PaddleOCR.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 507, + "n_words": 161, + "vocab_size": 98, + "complexity": 2, + "nloc": 27, + "token_counts": 370, + "n_ast_nodes": 561, + "n_identifiers": 45, + "d_id": 4526, + "documentation": { + "docstring": "Generate crop box and make sure not to crop the polygon instances.\n\n Args:\n img_size (tuple(int)): The image size (h, w).\n results (dict): The results dict.\n ", + "n_words": 25, + "vocab_size": 22, + "n_whitespaces": 61, + "language": "en" + } + }, + { + "id": 167144, + "commit_id": "25749d29dbbb8c6ae7a05f4661948d03c17b20ae", + "repo": "pandas", + "path": "pandas/core/arrays/period.py", + "file_name": "period.py", + "fun_name": "dt64arr_to_periodarr", + "commit_message": "ENH: DTA.to_period support non-nano (#47324)\n\n* ENH: DTA.to_period support non-nano\r\n\r\n* update test", + "code": "def dt64arr_to_periodarr(data, freq, tz=None):\n \n if not isinstance(data.dtype, np.dtype) or data.dtype.kind != \"M\":\n raise ValueError(f\"Wrong dtype: {data.dtype}\")\n\n if freq is None:\n if isinstance(data, ABCIndex):\n data, freq = data._values, data.freq\n elif isinstance(data, ABCSeries):\n data, freq = data._values, data.dt.freq\n\n elif isinstance(data, (ABCIndex, ABCSeries)):\n data = data._values\n\n reso = get_unit_from_dtype(data.dtype)\n freq = Period._maybe_convert_freq(freq)\n base = freq._period_dtype_code\n return c_dt64arr_to_periodarr(data.view(\"i8\"), base, tz, reso=reso), freq\n\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 132, + "n_words": 58, + "vocab_size": 42, + "complexity": 7, + "nloc": 14, + "token_counts": 142, + "n_ast_nodes": 229, + "n_identifiers": 21, + "d_id": 39943, + "documentation": { + "docstring": "\n Convert an datetime-like array to values Period ordinals.\n\n Parameters\n ----------\n data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]\n freq : Optional[Union[str, Tick]]\n Must match the `freq` on the `data` if `data` is a DatetimeIndex\n or Series.\n tz : Optional[tzinfo]\n\n Returns\n -------\n ordinals : ndarray[int64]\n freq : Tick\n The frequency extracted from the Series or DatetimeIndex if that's\n used.\n\n ", + "n_words": 55, + "vocab_size": 44, + "n_whitespaces": 117, + "language": "en" + } + }, + { + "id": 36650, + "commit_id": "5b40a37bc4da9dc6cd33876ce9bb3f7f48450a03", + "repo": "transformers", + "path": "src/transformers/models/vit_mae/modeling_tf_vit_mae.py", + "file_name": "modeling_tf_vit_mae.py", + "fun_name": "get_2d_sincos_pos_embed", + "commit_message": "Add TF ViT MAE (#16255)\n\n* ported TFViTMAEIntermediate and TFViTMAEOutput.\r\n\r\n* added TFViTMAEModel and TFViTMAEDecoder.\r\n\r\n* feat: added a noise argument in the implementation for reproducibility.\r\n\r\n* feat: vit mae models with an additional noise argument for reproducibility.\r\n\r\nCo-authored-by: ariG23498 \r\nCo-authored-by: ydshieh ", + "code": "def get_2d_sincos_pos_embed(embed_dim, grid_size, add_cls_token=False):\n \n grid_h = tf.range(grid_size, dtype=tf.float32)\n grid_w = tf.range(grid_size, dtype=tf.float32)\n grid = tf.meshgrid(grid_w, grid_h) # here w goes first\n grid = tf.stack(grid, axis=0)\n\n grid = tf.reshape(grid, [2, 1, grid_size, grid_size])\n pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)\n if add_cls_token:\n pos_embed = tf.concat([tf.zeros((1, embed_dim)), pos_embed], axis=0)\n return pos_embed\n\n", + "url": "https://github.com/huggingface/transformers.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 81, + "n_words": 46, + "vocab_size": 32, + "complexity": 2, + "nloc": 10, + "token_counts": 118, + "n_ast_nodes": 176, + "n_identifiers": 19, + "d_id": 6659, + "documentation": { + "docstring": "\n Create 2D sin/cos positional embeddings.\n\n Args:\n embed_dim (`int`):\n Embedding dimension.\n grid_size (`int`):\n The grid height and width.\n add_cls_token (`bool`, *optional*, defaults to `False`):\n Whether or not to add a classification (CLS) token.\n\n Returns:\n (`tf.Tensor` of shape (grid_size*grid_size, embed_dim) or (1+grid_size*grid_size, embed_dim): the position\n embeddings (with or without classification token)\n ", + "n_words": 49, + "vocab_size": 44, + "n_whitespaces": 130, + "language": "en" + } + }, + { + "id": 153819, + "commit_id": "57e29bc5d82348006c5170ef9ac0a9eedcd9acf9", + "repo": "modin", + "path": "modin/core/storage_formats/base/query_compiler.py", + "file_name": "query_compiler.py", + "fun_name": "df_update", + "commit_message": "REFACTOR-#4513: Fix spelling mistakes in docs and docstrings (#4514)\n\nCo-authored-by: Rehan Sohail Durrani \r\nSigned-off-by: jeffreykennethli ", + "code": "def df_update(self, other, **kwargs): # noqa: PR02\n \n return BinaryDefault.register(pandas.DataFrame.update, inplace=True)(\n self, other=other, **kwargs\n )\n", + "url": "https://github.com/modin-project/modin.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 47, + "n_words": 14, + "vocab_size": 14, + "complexity": 1, + "nloc": 4, + "token_counts": 36, + "n_ast_nodes": 56, + "n_identifiers": 10, + "d_id": 35634, + "documentation": { + "docstring": "\n Update values of `self` using non-NA values of `other` at the corresponding positions.\n\n If axes are not equal, perform frames alignment first.\n\n Parameters\n ----------\n other : BaseQueryCompiler\n Frame to grab replacement values from.\n join : {\"left\"}\n Specify type of join to align frames if axes are not equal\n (note: currently only one type of join is implemented).\n overwrite : bool\n Whether to overwrite every corresponding value of self, or only if it's NAN.\n filter_func : callable(pandas.Series, pandas.Series) -> numpy.ndarray\n Function that takes column of the self and return bool mask for values, that\n should be overwritten in the self frame.\n errors : {\"raise\", \"ignore\"}\n If \"raise\", will raise a ``ValueError`` if `self` and `other` both contain\n non-NA data in the same place.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with updated values.\n ", + "n_words": 142, + "vocab_size": 100, + "n_whitespaces": 351, + "language": "en" + } + }, + { + "id": 167007, + "commit_id": "3350f95c017a68644e8577651af743413532356f", + "repo": "pandas", + "path": "pandas/core/arrays/timedeltas.py", + "file_name": "timedeltas.py", + "fun_name": "total_seconds", + "commit_message": "REF: share DTA/TDA/PA arithmetic methods (#47205)\n\n* REF: share DTA/TDA/PA arithmetic methods\r\n\r\n* troubleshoot npdev build", + "code": "def total_seconds(self) -> npt.NDArray[np.float64]:\n \n return self._maybe_mask_results(1e-9 * self.asi8, fill_value=None)\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 23, + "n_words": 9, + "vocab_size": 9, + "complexity": 1, + "nloc": 56, + "token_counts": 32, + "n_ast_nodes": 48, + "n_identifiers": 9, + "d_id": 39917, + "documentation": { + "docstring": "\n Return total duration of each element expressed in seconds.\n\n This method is available directly on TimedeltaArray, TimedeltaIndex\n and on Series containing timedelta values under the ``.dt`` namespace.\n\n Returns\n -------\n seconds : [ndarray, Float64Index, Series]\n When the calling object is a TimedeltaArray, the return type\n is ndarray. When the calling object is a TimedeltaIndex,\n the return type is a Float64Index. When the calling object\n is a Series, the return type is Series of type `float64` whose\n index is the same as the original.\n\n See Also\n --------\n datetime.timedelta.total_seconds : Standard library version\n of this method.\n TimedeltaIndex.components : Return a DataFrame with components of\n each Timedelta.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))\n >>> s\n 0 0 days\n 1 1 days\n 2 2 days\n 3 3 days\n 4 4 days\n dtype: timedelta64[ns]\n\n >>> s.dt.total_seconds()\n 0 0.0\n 1 86400.0\n 2 172800.0\n 3 259200.0\n 4 345600.0\n dtype: float64\n\n **TimedeltaIndex**\n\n >>> idx = pd.to_timedelta(np.arange(5), unit='d')\n >>> idx\n TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],\n dtype='timedelta64[ns]', freq=None)\n\n >>> idx.total_seconds()\n Float64Index([0.0, 86400.0, 172800.0, 259200.00000000003, 345600.0],\n dtype='float64')\n ", + "n_words": 172, + "vocab_size": 105, + "n_whitespaces": 569, + "language": "en" + } + }, + { + "id": 271853, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/engine/training_utils_v1.py", + "file_name": "training_utils_v1.py", + "fun_name": "batch_shuffle", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def batch_shuffle(index_array, batch_size):\n \n batch_count = int(len(index_array) / batch_size)\n # to reshape we need to be cleanly divisible by batch size\n # we stash extra items and reappend them after shuffling\n last_batch = index_array[batch_count * batch_size :]\n index_array = index_array[: batch_count * batch_size]\n index_array = index_array.reshape((batch_count, batch_size))\n np.random.shuffle(index_array)\n index_array = index_array.flatten()\n return np.append(index_array, last_batch)\n\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 83, + "n_words": 53, + "vocab_size": 42, + "complexity": 1, + "nloc": 8, + "token_counts": 73, + "n_ast_nodes": 119, + "n_identifiers": 13, + "d_id": 80870, + "documentation": { + "docstring": "Shuffles an array in a batch-wise fashion.\n\n Useful for shuffling HDF5 arrays\n (where one cannot access arbitrary indices).\n\n Args:\n index_array: array of indices to be shuffled.\n batch_size: integer.\n\n Returns:\n The `index_array` array, shuffled in a batch-wise fashion.\n ", + "n_words": 37, + "vocab_size": 32, + "n_whitespaces": 73, + "language": "en" + } + }, + { + "id": 196246, + "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", + "repo": "sympy", + "path": "sympy/functions/elementary/piecewise.py", + "file_name": "piecewise.py", + "fun_name": "_intervals", + "commit_message": "Updated import locations", + "code": "def _intervals(self, sym):\n \n from sympy.solvers.inequalities import _solve_inequality\n\n assert isinstance(self, Piecewise)\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 7, + "n_whitespaces": 31, + "n_words": 10, + "vocab_size": 10, + "complexity": 31, + "nloc": 82, + "token_counts": 577, + "n_ast_nodes": 36, + "n_identifiers": 9, + "d_id": 47746, + "documentation": { + "docstring": "Return a list of unique tuples, (a, b, e, i), where a and b\n are the lower and upper bounds in which the expression e of\n argument i in self is defined and $a < b$ (when involving\n numbers) or $a \\le b$ when involving symbols.\n\n If there are any relationals not involving sym, or any\n relational cannot be solved for sym, NotImplementedError is\n raised. The calling routine should have removed such\n relationals before calling this routine.\n\n The evaluated conditions will be returned as ranges.\n Discontinuous ranges will be returned separately with\n identical expressions. The first condition that evaluates to\n True will be returned as the last tuple with a, b = -oo, oo.\n ", + "n_words": 114, + "vocab_size": 84, + "n_whitespaces": 198, + "language": "en" + } + }, + { + "id": 268879, + "commit_id": "b4dca51d0558e788f62a96d1009a07f773a202f4", + "repo": "keras", + "path": "keras/metrics/__init__.py", + "file_name": "__init__.py", + "fun_name": "get", + "commit_message": "Refactor disparate metrics-related files into a single metrics folder.\n\nFurther work may be needed to split up the long file with individual metric definitions. However having a single file per metric may be too granular. TBD.\n\nPiperOrigin-RevId: 425248502", + "code": "def get(identifier):\n \n if isinstance(identifier, dict):\n return deserialize(identifier)\n elif isinstance(identifier, str):\n return deserialize(str(identifier))\n elif callable(identifier):\n return identifier\n else:\n raise ValueError(\n f'Could not interpret metric identifier: {identifier}')\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 49, + "n_words": 25, + "vocab_size": 21, + "complexity": 4, + "nloc": 10, + "token_counts": 51, + "n_ast_nodes": 89, + "n_identifiers": 8, + "d_id": 79746, + "documentation": { + "docstring": "Retrieves a Keras metric as a `function`/`Metric` class instance.\n\n The `identifier` may be the string name of a metric function or class.\n\n >>> metric = tf.keras.metrics.get(\"categorical_crossentropy\")\n >>> type(metric)\n \n >>> metric = tf.keras.metrics.get(\"CategoricalCrossentropy\")\n >>> type(metric)\n \n\n You can also specify `config` of the metric to this function by passing dict\n containing `class_name` and `config` as an identifier. Also note that the\n `class_name` must map to a `Metric` class\n\n >>> identifier = {\"class_name\": \"CategoricalCrossentropy\",\n ... \"config\": {\"from_logits\": True}}\n >>> metric = tf.keras.metrics.get(identifier)\n >>> type(metric)\n \n\n Args:\n identifier: A metric identifier. One of None or string name of a metric\n function/class or metric configuration dictionary or a metric function or\n a metric class instance\n\n Returns:\n A Keras metric as a `function`/ `Metric` class instance.\n\n Raises:\n ValueError: If `identifier` cannot be interpreted.\n ", + "n_words": 132, + "vocab_size": 72, + "n_whitespaces": 184, + "language": "en" + } + }, + { + "id": 223513, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/email/_header_value_parser.py", + "file_name": "_header_value_parser.py", + "fun_name": "get_attrtext", + "commit_message": "add python 3.10.4 for windows", + "code": "def get_attrtext(value):\n \n m = _non_attribute_end_matcher(value)\n if not m:\n raise errors.HeaderParseError(\n \"expected attrtext but found {!r}\".format(value))\n attrtext = m.group()\n value = value[len(attrtext):]\n attrtext = ValueTerminal(attrtext, 'attrtext')\n _validate_xtext(attrtext)\n return attrtext, value\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 71, + "n_words": 29, + "vocab_size": 23, + "complexity": 2, + "nloc": 10, + "token_counts": 61, + "n_ast_nodes": 106, + "n_identifiers": 12, + "d_id": 56942, + "documentation": { + "docstring": "attrtext = 1*(any non-ATTRIBUTE_ENDS character)\n\n We allow any non-ATTRIBUTE_ENDS in attrtext, but add defects to the\n token's defects list if we find non-attrtext characters. We also register\n defects for *any* non-printables even though the RFC doesn't exclude all of\n them, because we follow the spirit of RFC 5322.\n\n ", + "n_words": 48, + "vocab_size": 39, + "n_whitespaces": 64, + "language": "en" + } + }, + { + "id": 157120, + "commit_id": "c4d35f5515191409913827fd4faa3b69a3d7399a", + "repo": "dask", + "path": "dask/dataframe/io/io.py", + "file_name": "io.py", + "fun_name": "from_dict", + "commit_message": "Backend library dispatching for IO in Dask-Array and Dask-DataFrame (#9475)", + "code": "def from_dict(data, npartitions, orient=\"columns\", dtype=None, columns=None):\n \n\n collection_types = {type(v) for v in data.values() if is_dask_collection(v)}\n if collection_types:\n raise NotImplementedError(\n \"from_dict doesn't currently support Dask collections as inputs. \"\n f\"Objects of type {collection_types} were given in the input dict.\"\n )\n\n return from_pandas(\n pd.DataFrame.from_dict(data, orient, dtype, columns),\n npartitions,\n )\n\n", + "url": "https://github.com/dask/dask.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 112, + "n_words": 47, + "vocab_size": 43, + "complexity": 4, + "nloc": 11, + "token_counts": 72, + "n_ast_nodes": 116, + "n_identifiers": 15, + "d_id": 36860, + "documentation": { + "docstring": "\n Construct a Dask DataFrame from a Python Dictionary\n\n Parameters\n ----------\n data : dict\n Of the form {field : array-like} or {field : dict}.\n npartitions : int\n The number of partitions of the index to create. Note that depending on\n the size and index of the dataframe, the output may have fewer\n partitions than requested.\n orient : {'columns', 'index', 'tight'}, default 'columns'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the columns of the resulting DataFrame, pass 'columns'\n (default). Otherwise if the keys should be rows, pass 'index'.\n If 'tight', assume a dict with keys\n ['index', 'columns', 'data', 'index_names', 'column_names'].\n dtype: bool\n Data type to force, otherwise infer.\n columns: string, optional\n Column labels to use when ``orient='index'``. Raises a ValueError\n if used with ``orient='columns'`` or ``orient='tight'``.\n\n Examples\n --------\n >>> import dask.dataframe as dd\n >>> ddf = dd.from_dict({\"num1\": [1, 2, 3, 4], \"num2\": [7, 8, 9, 10]}, npartitions=2)\n ", + "n_words": 152, + "vocab_size": 111, + "n_whitespaces": 276, + "language": "en" + } + }, + { + "id": 63195, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py", + "file_name": "__init__.py", + "fun_name": "_is_unpacked_egg", + "commit_message": "upd; format", + "code": "def _is_unpacked_egg(path):\n \n return (\n _is_egg_path(path) and\n os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))\n )\n\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 33, + "n_words": 10, + "vocab_size": 10, + "complexity": 2, + "nloc": 5, + "token_counts": 33, + "n_ast_nodes": 57, + "n_identifiers": 6, + "d_id": 13197, + "documentation": { + "docstring": "\n Determine if given path appears to be an unpacked egg.\n ", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 17, + "language": "en" + } + }, + { + "id": 206404, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/test/runner.py", + "file_name": "runner.py", + "fun_name": "shuffle", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def shuffle(self, items, key):\n \n hashes = {}\n for item in items:\n hashed = self._hash_item(item, key)\n if hashed in hashes:\n msg = \"item {!r} has same hash {!r} as item {!r}\".format(\n item,\n hashed,\n hashes[hashed],\n )\n raise RuntimeError(msg)\n hashes[hashed] = item\n return [hashes[hashed] for hashed in sorted(hashes)]\n\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 208, + "n_words": 45, + "vocab_size": 34, + "complexity": 4, + "nloc": 13, + "token_counts": 75, + "n_ast_nodes": 115, + "n_identifiers": 12, + "d_id": 51514, + "documentation": { + "docstring": "\n Return a new list of the items in a shuffled order.\n\n The `key` is a function that accepts an item in `items` and returns\n a string unique for that item that can be viewed as a string id. The\n order of the return value is deterministic. It depends on the seed\n and key function but not on the original order.\n ", + "n_words": 60, + "vocab_size": 41, + "n_whitespaces": 103, + "language": "en" + } + }, + { + "id": 260503, + "commit_id": "01fcf8a0acc7e6517faa4fc6887eb45f5d2ea77b", + "repo": "scikit-learn", + "path": "sklearn/decomposition/tests/test_sparse_pca.py", + "file_name": "test_sparse_pca.py", + "fun_name": "test_transform_inverse_transform_round_trip", + "commit_message": "ENH add inverse_transform in *SparsePCA (#23905)", + "code": "def test_transform_inverse_transform_round_trip(SPCA):\n \n rng = np.random.RandomState(0)\n n_samples, n_features = 10, 5\n X = rng.randn(n_samples, n_features)\n\n n_components = n_features\n spca = SPCA(\n n_components=n_components, alpha=1e-12, ridge_alpha=1e-12, random_state=0\n )\n X_trans_spca = spca.fit_transform(X)\n assert_allclose(spca.inverse_transform(X_trans_spca), X)\n", + "url": "https://github.com/scikit-learn/scikit-learn.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 64, + "n_words": 30, + "vocab_size": 24, + "complexity": 1, + "nloc": 10, + "token_counts": 79, + "n_ast_nodes": 118, + "n_identifiers": 19, + "d_id": 76298, + "documentation": { + "docstring": "Check the `transform` and `inverse_transform` round trip with no loss of\n information.\n ", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 18, + "language": "en" + } + }, + { + "id": 195437, + "commit_id": "0f129e9c38b6b10d80982ecc412785db62842938", + "repo": "ParlAI", + "path": "parlai/tasks/reasoning/reason_types/step_by_step.py", + "file_name": "step_by_step.py", + "fun_name": "extract_operations", + "commit_message": "ROSCOE suite of metrics (#4839)\n\n* ROSCOE suite of metrics\r\n\r\n* updating tests\r\n\r\n* lint\r\n\r\n* fixing protobuf version to stop cleaninstall failures\r\n\r\n* updating requirements\r\n\r\n* convert to absolute path\r\n\r\n* moving tests because of the dependency issues\r\n\r\n* adding new dependencies in tests\r\n\r\n* add test dependencies\r\n\r\n* fixing deps\r\n\r\n* updating task list\r\n\r\n* checklist deps can't be installed on circleci\r\n\r\n* actually fix protobuf version\r\n\r\n* protobuf range\r\n\r\n* protobuf conflict with google-api-core\r\n\r\n* return tests\r\n\r\n* convert imports to absolute path\r\n\r\n* trying checklist again\r\n\r\n* trying to avoid checklist failures\r\n\r\n* checklist to teacher tests\r\n\r\n* add user option to avoid installation failure\r\n\r\n* jupiter as well\r\n\r\n* typo\r\n\r\n* moving into virtual env setup\r\n\r\n* user param not allowed in virtual env\r\n\r\n* move spacy to circleCI because it's big\r\n\r\n* replace local model with HF\r\n\r\n* fixes based on comments\r\n\r\n* remove unused nli scores, fix tests\r\n\r\n* Added path to BART model\r\n\r\nCo-authored-by: Spencer Poff ", + "code": "def extract_operations(self) -> List[str]:\n \n if not self.step:\n return []\n try:\n operations = re.findall(r'[-+*^/]', self.step)\n except TypeError as e:\n print(f\"TYPE: {type(self.step)}\")\n print(f\"STEP: {self.step}\")\n raise e\n return operations\n", + "url": "https://github.com/facebookresearch/ParlAI.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 116, + "n_words": 26, + "vocab_size": 24, + "complexity": 3, + "nloc": 13, + "token_counts": 54, + "n_ast_nodes": 111, + "n_identifiers": 12, + "d_id": 47260, + "documentation": { + "docstring": "\n Finds all instances of the math operations: -, +, *, ^, / in the step.\n ", + "n_words": 15, + "vocab_size": 14, + "n_whitespaces": 30, + "language": "en" + } + }, + { + "id": 269604, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/backend.py", + "file_name": "backend.py", + "fun_name": "less_equal", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def less_equal(x, y):\n \n return tf.less_equal(x, y)\n\n\n@keras_export(\"keras.backend.maximum\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "@keras_export(\"keras.backend.maximum\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", + "n_ast_errors": 1, + "ast_levels": 7, + "n_whitespaces": 12, + "n_words": 9, + "vocab_size": 9, + "complexity": 1, + "nloc": 2, + "token_counts": 17, + "n_ast_nodes": 57, + "n_identifiers": 10, + "d_id": 80224, + "documentation": { + "docstring": "Element-wise truth value of (x <= y).\n\n Args:\n x: Tensor or variable.\n y: Tensor or variable.\n\n Returns:\n A bool tensor.\n ", + "n_words": 20, + "vocab_size": 17, + "n_whitespaces": 50, + "language": "en" + } + }, + { + "id": 65410, + "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", + "repo": "erpnext", + "path": "erpnext/accounts/utils.py", + "file_name": "utils.py", + "fun_name": "update_reference_in_journal_entry", + "commit_message": "style: format code with black", + "code": "def update_reference_in_journal_entry(d, journal_entry, do_not_save=False):\n\t\n\tjv_detail = journal_entry.get(\"accounts\", {\"name\": d[\"voucher_detail_no\"]})[0]\n\n\tif flt(d[\"unadjusted_amount\"]) - flt(d[\"allocated_amount\"]) != 0:\n\t\t# adjust the unreconciled balance\n\t\tamount_in_account_currency = flt(d[\"unadjusted_amount\"]) - flt(d[\"allocated_amount\"])\n\t\tamount_in_company_currency = amount_in_account_currency * flt(jv_detail.exchange_rate)\n\t\tjv_detail.set(d[\"dr_or_cr\"], amount_in_account_currency)\n\t\tjv_detail.set(\n\t\t\t\"debit\" if d[\"dr_or_cr\"] == \"debit_in_account_currency\" else \"credit\",\n\t\t\tamount_in_company_currency,\n\t\t)\n\telse:\n\t\tjournal_entry.remove(jv_detail)\n\n\t# new row with references\n\tnew_row = journal_entry.append(\"accounts\")\n\n\tnew_row.update((frappe.copy_doc(jv_detail)).as_dict())\n\n\tnew_row.set(d[\"dr_or_cr\"], d[\"allocated_amount\"])\n\tnew_row.set(\n\t\t\"debit\" if d[\"dr_or_cr\"] == \"debit_in_account_currency\" else \"credit\",\n\t\td[\"allocated_amount\"] * flt(jv_detail.exchange_rate),\n\t)\n\n\tnew_row.set(\n\t\t\"credit_in_account_currency\"\n\t\tif d[\"dr_or_cr\"] == \"debit_in_account_currency\"\n\t\telse \"debit_in_account_currency\",\n\t\t0,\n\t)\n\tnew_row.set(\"credit\" if d[\"dr_or_cr\"] == \"debit_in_account_currency\" else \"debit\", 0)\n\n\tnew_row.set(\"reference_type\", d[\"against_voucher_type\"])\n\tnew_row.set(\"reference_name\", d[\"against_voucher\"])\n\n\tnew_row.against_account = cstr(jv_detail.against_account)\n\tnew_row.is_advance = cstr(jv_detail.is_advance)\n\tnew_row.docstatus = 1\n\n\t# will work as update after submit\n\tjournal_entry.flags.ignore_validate_update_after_submit = True\n\tif not do_not_save:\n\t\tjournal_entry.save(ignore_permissions=True)\n\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 75, + "n_words": 112, + "vocab_size": 76, + "complexity": 7, + "nloc": 34, + "token_counts": 283, + "n_ast_nodes": 495, + "n_identifiers": 26, + "d_id": 13890, + "documentation": { + "docstring": "\n\tUpdates against document, if partial amount splits into rows\n\t", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 135567, + "commit_id": "d329147ae28c57b290f6b932f9f3044523f67c4e", + "repo": "ray", + "path": "rllib/utils/tests/test_actor_manager.py", + "file_name": "test_actor_manager.py", + "fun_name": "test_sync_call_healthy_only", + "commit_message": "[RLlib] Introduce FaultTolerantActorManager (#29703)\n\nSigned-off-by: Jun Gong ", + "code": "def test_sync_call_healthy_only(self):\n \n actors = [Actor.remote(i) for i in range(4)]\n manager = FaultTolerantActorManager(actors=actors)\n\n results = []\n for _ in range(10):\n results.extend(\n manager.foreach_actor(\n lambda w: w.call(), healthy_only=True\n ).ignore_errors()\n )\n # Wait for actors to recover.\n wait_for_restore()\n\n # Notice that since we only fire calls against healthy actors,\n # we wouldn't be aware that the actors have been recovered.\n # So once an actor is taken out of the lineup (10% chance),\n # it will not go back in, and we should have few results here.\n # Basically takes us 10 calls to kill all the actors.\n # Note that we can hardcode 10 here because we are using deterministic\n # sequences of random numbers.\n self.assertEqual(len(results), 10)\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 298, + "n_words": 114, + "vocab_size": 86, + "complexity": 3, + "nloc": 12, + "token_counts": 83, + "n_ast_nodes": 144, + "n_identifiers": 20, + "d_id": 30658, + "documentation": { + "docstring": "Test synchronous remote calls to only healthy actors.", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 71720, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/admin/tests/pages/test_revisions.py", + "file_name": "test_revisions.py", + "fun_name": "test_base_form_class_used", + "commit_message": "Reformat with black", + "code": "def test_base_form_class_used(self):\n \n edit_url = reverse(\n \"wagtailadmin_pages:add\",\n args=(\"tests\", \"formclassadditionalfieldpage\", self.test_page.id),\n )\n response = self.client.get(edit_url)\n self.assertContains(\n response,\n '',\n html=True,\n )\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 121, + "n_words": 24, + "vocab_size": 22, + "complexity": 1, + "nloc": 11, + "token_counts": 50, + "n_ast_nodes": 84, + "n_identifiers": 12, + "d_id": 15715, + "documentation": { + "docstring": "First ensure that the non-model field is appearing in edit.", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 257346, + "commit_id": "f6e3a639063887f9f5b27f574a04c7fe602b3185", + "repo": "haystack", + "path": "haystack/pipelines/base.py", + "file_name": "base.py", + "fun_name": "components", + "commit_message": "Prevent losing names of utilized components when loaded from config (#2525)\n\n* Prevent losing names of utilized components when loaded from config\r\n\r\n* Update Documentation & Code Style\r\n\r\n* update test\r\n\r\n* fix failing tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix even more tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* incorporate review feedback\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", + "code": "def components(self) -> Dict[str, BaseComponent]:\n \n all_components = self._find_all_components()\n return {component.name: component for component in all_components if component.name is not None}\n", + "url": "https://github.com/deepset-ai/haystack.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 41, + "n_words": 20, + "vocab_size": 18, + "complexity": 3, + "nloc": 7, + "token_counts": 39, + "n_ast_nodes": 61, + "n_identifiers": 9, + "d_id": 75070, + "documentation": { + "docstring": "\n Returns all components used by this pipeline.\n Note that this also includes such components that are being utilized by other components only and are not being used as a pipeline node directly.\n ", + "n_words": 32, + "vocab_size": 24, + "n_whitespaces": 54, + "language": "en" + } + }, + { + "id": 163213, + "commit_id": "b5c6e4713ae4397cd047cb41f11aca4d27fb6096", + "repo": "pandas", + "path": "pandas/core/indexing.py", + "file_name": "indexing.py", + "fun_name": "_ensure_iterable_column_indexer", + "commit_message": "CLN: suppress warnings (#45212)", + "code": "def _ensure_iterable_column_indexer(self, column_indexer):\n \n ilocs: Sequence[int]\n if is_integer(column_indexer):\n ilocs = [column_indexer]\n elif isinstance(column_indexer, slice):\n ilocs = np.arange(len(self.obj.columns))[column_indexer]\n elif isinstance(column_indexer, np.ndarray) and is_bool_dtype(\n column_indexer.dtype\n ):\n ilocs = np.arange(len(column_indexer))[column_indexer]\n else:\n ilocs = column_indexer\n return ilocs\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 143, + "n_words": 32, + "vocab_size": 23, + "complexity": 5, + "nloc": 13, + "token_counts": 89, + "n_ast_nodes": 143, + "n_identifiers": 17, + "d_id": 39403, + "documentation": { + "docstring": "\n Ensure that our column indexer is something that can be iterated over.\n ", + "n_words": 12, + "vocab_size": 11, + "n_whitespaces": 27, + "language": "en" + } + }, + { + "id": 168252, + "commit_id": "2f8d0a36703e81e4dca52ca9fe4f58c910c1b304", + "repo": "pandas", + "path": "pandas/core/indexes/range.py", + "file_name": "range.py", + "fun_name": "_start", + "commit_message": "PERF cache find_stack_level (#48023)\n\ncache stacklevel", + "code": "def _start(self) -> int:\n \n warnings.warn(\n self._deprecation_message.format(\"_start\", \"start\"),\n FutureWarning,\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n return self.start\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 73, + "n_words": 12, + "vocab_size": 12, + "complexity": 1, + "nloc": 13, + "token_counts": 41, + "n_ast_nodes": 69, + "n_identifiers": 13, + "d_id": 40259, + "documentation": { + "docstring": "\n The value of the `start` parameter (``0`` if this was not supplied).\n\n .. deprecated:: 0.25.0\n Use ``start`` instead.\n ", + "n_words": 18, + "vocab_size": 18, + "n_whitespaces": 52, + "language": "en" + } + }, + { + "id": 247957, + "commit_id": "2e2d8cc2f9b9af5f8b48d75e22c474e08feca236", + "repo": "synapse", + "path": "tests/rest/admin/test_server_notice.py", + "file_name": "test_server_notice.py", + "fun_name": "test_update_notice_user_name_when_changed", + "commit_message": "Update the server notices user profile in room if changed. (#12115)", + "code": "def test_update_notice_user_name_when_changed(self) -> None:\n \n server_notice_request_content = {\n \"user_id\": self.other_user,\n \"content\": {\"msgtype\": \"m.text\", \"body\": \"test msg one\"},\n }\n\n self.make_request(\n \"POST\",\n self.url,\n access_token=self.admin_user_tok,\n content=server_notice_request_content,\n )\n\n # simulate a change in server config after a server restart.\n new_display_name = \"new display name\"\n self.server_notices_manager._config.servernotices.server_notices_mxid_display_name = (\n new_display_name\n )\n self.server_notices_manager.get_or_create_notice_room_for_user.cache.invalidate_all()\n\n self.make_request(\n \"POST\",\n self.url,\n access_token=self.admin_user_tok,\n content=server_notice_request_content,\n )\n\n invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0)\n notice_room_id = invited_rooms[0].room_id\n self.helper.join(\n room=notice_room_id, user=self.other_user, tok=self.other_user_token\n )\n\n notice_user_state_in_room = self.helper.get_state(\n notice_room_id,\n \"m.room.member\",\n self.other_user_token,\n state_key=\"@notices:test\",\n )\n self.assertEqual(notice_user_state_in_room[\"displayname\"], new_display_name)\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 383, + "n_words": 74, + "vocab_size": 57, + "complexity": 1, + "nloc": 38, + "token_counts": 175, + "n_ast_nodes": 282, + "n_identifiers": 31, + "d_id": 72024, + "documentation": { + "docstring": "\n Tests that existing server notices user name in room is updated after\n server notice config changes.\n ", + "n_words": 16, + "vocab_size": 15, + "n_whitespaces": 38, + "language": "en" + } + }, + { + "id": 249764, + "commit_id": "6a6e1e8c0711939338f25d8d41d1e4d33d984949", + "repo": "synapse", + "path": "tests/rest/client/test_rooms.py", + "file_name": "test_rooms.py", + "fun_name": "_create_basic_room", + "commit_message": "Fix room creation being rate limited too aggressively since Synapse v1.69.0. (#14314)\n\n* Introduce a test for the old behaviour which we want to restore\r\n\r\n* Reintroduce the old behaviour in a simpler way\r\n\r\n* Newsfile\r\n\r\nSigned-off-by: Olivier Wilkinson (reivilibre) \r\n\r\n* Use 1 credit instead of 2 for creating a room: be more lenient than before\r\n\r\nNotably, the UI in Element Web was still broken after restoring to prior behaviour.\r\n\r\nAfter discussion, we agreed that it would be sensible to increase the limit.\r\n\r\nSigned-off-by: Olivier Wilkinson (reivilibre) ", + "code": "def _create_basic_room(self) -> Tuple[int, object]:\n \n channel = self.make_request(\n \"POST\",\n \"/createRoom\",\n {},\n )\n return channel.code, channel.json_body\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 76, + "n_words": 15, + "vocab_size": 15, + "complexity": 1, + "nloc": 10, + "token_counts": 35, + "n_ast_nodes": 58, + "n_identifiers": 9, + "d_id": 73110, + "documentation": { + "docstring": "\n Tries to create a basic room and returns the response code.\n ", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 26, + "language": "en" + } + }, + { + "id": 155711, + "commit_id": "5e8a4813cf948250608b16747773a7dc52088eb6", + "repo": "dask", + "path": "dask/dataframe/methods.py", + "file_name": "methods.py", + "fun_name": "boundary_slice", + "commit_message": "Deprecate `is_monotonic` (#8653)\n\nThis PR deprecates `is_monotonic` to follow what `pandas` is doing upstream. This also resolves some test failures in our `upstream` build", + "code": "def boundary_slice(df, start, stop, right_boundary=True, left_boundary=True, kind=None):\n \n if len(df.index) == 0:\n return df\n\n if PANDAS_GT_131:\n if kind is not None:\n warnings.warn(\n \"The `kind` argument is no longer used/supported. \"\n \"It will be dropped in a future release.\",\n category=FutureWarning,\n )\n kind_opts = {}\n kind = \"loc\"\n else:\n kind = kind or \"loc\"\n kind_opts = {\"kind\": kind}\n\n if kind == \"loc\" and not df.index.is_monotonic_increasing:\n # Pandas treats missing keys differently for label-slicing\n # on monotonic vs. non-monotonic indexes\n # If the index is monotonic, `df.loc[start:stop]` is fine.\n # If it's not, `df.loc[start:stop]` raises when `start` is missing\n if start is not None:\n if left_boundary:\n df = df[df.index >= start]\n else:\n df = df[df.index > start]\n if stop is not None:\n if right_boundary:\n df = df[df.index <= stop]\n else:\n df = df[df.index < stop]\n return df\n\n result = getattr(df, kind)[start:stop]\n if not right_boundary and stop is not None:\n right_index = result.index.get_slice_bound(stop, \"left\", **kind_opts)\n result = result.iloc[:right_index]\n if not left_boundary and start is not None:\n left_index = result.index.get_slice_bound(start, \"right\", **kind_opts)\n result = result.iloc[left_index:]\n return result\n\n", + "url": "https://github.com/dask/dask.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 488, + "n_words": 171, + "vocab_size": 95, + "complexity": 15, + "nloc": 35, + "token_counts": 233, + "n_ast_nodes": 378, + "n_identifiers": 22, + "d_id": 36453, + "documentation": { + "docstring": "Index slice start/stop. Can switch include/exclude boundaries.\n\n Examples\n --------\n >>> df = pd.DataFrame({'x': [10, 20, 30, 40, 50]}, index=[1, 2, 2, 3, 4])\n >>> boundary_slice(df, 2, None)\n x\n 2 20\n 2 30\n 3 40\n 4 50\n >>> boundary_slice(df, 1, 3)\n x\n 1 10\n 2 20\n 2 30\n 3 40\n >>> boundary_slice(df, 1, 3, right_boundary=False)\n x\n 1 10\n 2 20\n 2 30\n\n Empty input DataFrames are returned\n\n >>> df_empty = pd.DataFrame()\n >>> boundary_slice(df_empty, 1, 3)\n Empty DataFrame\n Columns: []\n Index: []\n ", + "n_words": 80, + "vocab_size": 49, + "n_whitespaces": 184, + "language": "en" + } + }, + { + "id": 53859, + "commit_id": "f97603bba836c215e153d7d3d5b3b9de4d0ae822", + "repo": "prefect", + "path": "src/prefect/task_runners.py", + "file_name": "task_runners.py", + "fun_name": "_ray", + "commit_message": "First draft `RayTaskRunner` implementation", + "code": "def _ray(self) -> \"ray\":\n \n global ray\n\n if ray is None:\n try:\n import ray\n except ImportError as exc:\n raise RuntimeError(\n \"Using the `RayTaskRunner` requires `ray` to be installed.\"\n ) from exc\n\n return ray\n", + "url": "https://github.com/PrefectHQ/prefect.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 146, + "n_words": 32, + "vocab_size": 29, + "complexity": 3, + "nloc": 14, + "token_counts": 33, + "n_ast_nodes": 61, + "n_identifiers": 6, + "d_id": 10926, + "documentation": { + "docstring": "\n Delayed import of `ray` allowing configuration of the task runner\n without the extra installed and improves `prefect` import times.\n ", + "n_words": 19, + "vocab_size": 16, + "n_whitespaces": 41, + "language": "en" + } + }, + { + "id": 27993, + "commit_id": "5d1a36b9aaf408016957db04f86397b2e53c2500", + "repo": "saleor", + "path": "saleor/thumbnail/utils.py", + "file_name": "utils.py", + "fun_name": "preprocess_WEBP", + "commit_message": "Better media thumbnails including WebP support (#9988)\n\n* Add thumbnail app\r\n\r\n* Update get_thumbnail_size method and add tests\r\n\r\n* Add logic for creating thumbnails\r\n\r\n* Update logic for getting thumbnail\r\n\r\n* Allow defining format for tumbnail generation\r\n\r\n* Clear handle_thumbnail views\r\n\r\n* Add prepare_image_proxy_url method\r\n\r\n* Use ImageField for user avatar\r\n\r\n* Allow defining thumbnail format when querying user avatar\r\n\r\n* Use ImageField for category backgound_image\r\n\r\n* Use ImageField for Collection backgound_image\r\n\r\n* Use ImageField for ProductMedia image\r\n\r\n* Ensure that thumbnails are deleted when category background_image is changed or deleted\r\n\r\n* Ensure that thumbnails are deleted when collection background_image is changed or deleted\r\n\r\n* Update product media deleteion task and failing tests\r\n\r\n* Delete thumbnail from storage when thumbnail objects is deleted\r\n\r\n* Fix import in product test_bulk_delete\r\n\r\n* Drop create_thumbnails command\r\n\r\n* Update Product.thumbnail resolver\r\n\r\n* Update OrderLine thumbnail resolver\r\n\r\n* Add missing ADDED_IN_35 and PREVIEW_FEATURE labels\r\n\r\n* Update account and product signals - ensure the image is deleted from storage\r\n\r\n* Refactor product_images methods\r\n\r\n* Add signal for product media image delete\r\n\r\n* Drop create_thumbnails method and not longer valid settings fields\r\n\r\n* Clean the ProcessedImage class\r\n\r\n* Drop versatileimagefield from INSTALLED_APPS\r\n\r\n* Update changelog\r\n\r\n* Drop comments from ThumbnailFormat\r\n\r\n* Add get_image_or_proxy_url method\r\n\r\n* Apply reiew suggestions - add ThumbnailField and use get_image_or_proxy_ur when it's possible\r\n\r\n* Update changelog\r\n\r\n* Replace ADDED_IN_35 with ADDED_IN_36 label\r\n\r\n* Update changelog\r\n\r\nCo-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>", + "code": "def preprocess_WEBP(self, image):\n \n save_kwargs = {\n \"quality\": self.WEBP_QUAL,\n \"lossless\": self.LOSSLESS_WEBP,\n \"icc_profile\": image.info.get(\"icc_profile\", \"\"),\n }\n\n return (image, save_kwargs)\n", + "url": "https://github.com/saleor/saleor.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 78, + "n_words": 17, + "vocab_size": 17, + "complexity": 1, + "nloc": 7, + "token_counts": 43, + "n_ast_nodes": 74, + "n_identifiers": 8, + "d_id": 5150, + "documentation": { + "docstring": "Receive a PIL Image instance of a WEBP and return 2-tuple.", + "n_words": 11, + "vocab_size": 10, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 199697, + "commit_id": "d1d46df73ebaad94089847558d00a8b7269f554d", + "repo": "sympy", + "path": "sympy/polys/orthopolys.py", + "file_name": "orthopolys.py", + "fun_name": "spherical_bessel_fn", + "commit_message": "Run orthopolys and appellseqs through a common interface\n\nIncluding unifying the two Chebyshev generators into one function.\nThere are also two kinds of Hermite polynomials, and they too share the\nsame recurrence, but the second type He_n(x) (aka the probabilist,\nreduced or small polynomials) will not be added here.", + "code": "def spherical_bessel_fn(n, x=None, polys=False):\n \n if x is None:\n x = Dummy(\"x\")\n f = dup_spherical_bessel_fn_minus if n < 0 else dup_spherical_bessel_fn\n return named_poly(abs(n), f, ZZ, \"\", (QQ(1)/x,), polys)\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 46, + "n_words": 27, + "vocab_size": 24, + "complexity": 3, + "nloc": 5, + "token_counts": 60, + "n_ast_nodes": 93, + "n_identifiers": 12, + "d_id": 49353, + "documentation": { + "docstring": "\n Coefficients for the spherical Bessel functions.\n\n These are only needed in the jn() function.\n\n The coefficients are calculated from:\n\n fn(0, z) = 1/z\n fn(1, z) = 1/z**2\n fn(n-1, z) + fn(n+1, z) == (2*n+1)/z * fn(n, z)\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n\n Examples\n ========\n\n >>> from sympy.polys.orthopolys import spherical_bessel_fn as fn\n >>> from sympy import Symbol\n >>> z = Symbol(\"z\")\n >>> fn(1, z)\n z**(-2)\n >>> fn(2, z)\n -1/z + 3/z**3\n >>> fn(3, z)\n -6/z**2 + 15/z**4\n >>> fn(4, z)\n 1/z - 45/z**3 + 105/z**5\n\n ", + "n_words": 105, + "vocab_size": 75, + "n_whitespaces": 195, + "language": "en" + } + }, + { + "id": 321054, + "commit_id": "d47cfd99d7374a41b4c228c21221c7850e65d0b1", + "repo": "qutebrowser", + "path": "qutebrowser/misc/backendproblem.py", + "file_name": "backendproblem.py", + "fun_name": "_try_import_backends", + "commit_message": "Run scripts/dev/rewrite_qt_imports.sh", + "code": "def _try_import_backends(self) -> _BackendImports:\n \n # pylint: disable=unused-import\n results = _BackendImports()\n\n try:\n from qutebrowser.qt import webkit as QtWebKit\n from qutebrowser.qt.webkit import qWebKitVersion\n from qutebrowser.qt import webkit as QtWebKitWidgets\n except (ImportError, ValueError) as e:\n results.webkit_error = str(e)\n else:\n if not qtutils.is_new_qtwebkit():\n results.webkit_error = \"Unsupported legacy QtWebKit found\"\n\n try:\n from qutebrowser.qt import webengine as QtWebEngineWidgets\n except (ImportError, ValueError) as e:\n results.webengine_error = str(e)\n\n return results\n", + "url": "https://github.com/qutebrowser/qutebrowser.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 217, + "n_words": 62, + "vocab_size": 37, + "complexity": 4, + "nloc": 17, + "token_counts": 100, + "n_ast_nodes": 166, + "n_identifiers": 20, + "d_id": 117494, + "documentation": { + "docstring": "Check whether backends can be imported and return BackendImports.", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 305503, + "commit_id": "1bc8770b51658f0dc1bd076b392d70be5a7433bc", + "repo": "core", + "path": "tests/components/google_assistant/test_smart_home.py", + "file_name": "test_smart_home.py", + "fun_name": "test_sync_in_area", + "commit_message": "Remove area_id from entity_registry.async_get_or_create (#77700)\n\n* Remove area_id from entity_registry.async_get_or_create\r\n\r\n* Adjust tests\r\n\r\n* Fix lying comment in test", + "code": "async def test_sync_in_area(area_on_device, hass, registries):\n \n area = registries.area.async_create(\"Living Room\")\n\n device = registries.device.async_get_or_create(\n config_entry_id=\"1234\",\n manufacturer=\"Someone\",\n model=\"Some model\",\n sw_version=\"Some Version\",\n connections={(device_registry.CONNECTION_NETWORK_MAC, \"12:34:56:AB:CD:EF\")},\n )\n registries.device.async_update_device(\n device.id, area_id=area.id if area_on_device else None\n )\n\n entity = registries.entity.async_get_or_create(\n \"light\",\n \"test\",\n \"1235\",\n suggested_object_id=\"demo_light\",\n device_id=device.id,\n )\n entity = registries.entity.async_update_entity(\n entity.entity_id, area_id=area.id if not area_on_device else None\n )\n\n light = DemoLight(\n None,\n \"Demo Light\",\n state=False,\n hs_color=(180, 75),\n effect_list=LIGHT_EFFECT_LIST,\n effect=LIGHT_EFFECT_LIST[0],\n )\n light.hass = hass\n light.entity_id = entity.entity_id\n await light.async_update_ha_state()\n\n config = MockConfig(should_expose=lambda _: True, entity_config={})\n\n events = async_capture_events(hass, EVENT_SYNC_RECEIVED)\n\n result = await sh.async_handle_message(\n hass,\n config,\n \"test-agent\",\n {\"requestId\": REQ_ID, \"inputs\": [{\"intent\": \"action.devices.SYNC\"}]},\n const.SOURCE_CLOUD,\n )\n\n assert result == {\n \"requestId\": REQ_ID,\n \"payload\": {\n \"agentUserId\": \"test-agent\",\n \"devices\": [\n {\n \"id\": \"light.demo_light\",\n \"name\": {\"name\": \"Demo Light\"},\n \"traits\": [\n trait.TRAIT_BRIGHTNESS,\n trait.TRAIT_ONOFF,\n trait.TRAIT_COLOR_SETTING,\n trait.TRAIT_MODES,\n ],\n \"type\": const.TYPE_LIGHT,\n \"willReportState\": False,\n \"attributes\": {\n \"availableModes\": [\n {\n \"name\": \"effect\",\n \"name_values\": [\n {\"lang\": \"en\", \"name_synonym\": [\"effect\"]}\n ],\n \"ordered\": False,\n \"settings\": [\n {\n \"setting_name\": \"rainbow\",\n \"setting_values\": [\n {\n \"lang\": \"en\",\n \"setting_synonym\": [\"rainbow\"],\n }\n ],\n },\n {\n \"setting_name\": \"none\",\n \"setting_values\": [\n {\"lang\": \"en\", \"setting_synonym\": [\"none\"]}\n ],\n },\n ],\n }\n ],\n \"colorModel\": \"hsv\",\n \"colorTemperatureRange\": {\n \"temperatureMinK\": 2000,\n \"temperatureMaxK\": 6535,\n },\n },\n \"deviceInfo\": {\n \"manufacturer\": \"Someone\",\n \"model\": \"Some model\",\n \"swVersion\": \"Some Version\",\n },\n \"roomHint\": \"Living Room\",\n }\n ],\n },\n }\n await hass.async_block_till_done()\n\n assert len(events) == 1\n assert events[0].event_type == EVENT_SYNC_RECEIVED\n assert events[0].data == {\"request_id\": REQ_ID, \"source\": \"cloud\"}\n\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 26, + "n_whitespaces": 1944, + "n_words": 213, + "vocab_size": 139, + "complexity": 3, + "nloc": 105, + "token_counts": 466, + "n_ast_nodes": 798, + "n_identifiers": 55, + "d_id": 104292, + "documentation": { + "docstring": "Test a sync message where room hint comes from area.", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 74901, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/documents/views/chooser.py", + "file_name": "chooser.py", + "fun_name": "get_document_chosen_response", + "commit_message": "Reformat with black", + "code": "def get_document_chosen_response(request, document):\n \n return render_modal_workflow(\n request,\n None,\n None,\n None,\n json_data={\n \"step\": \"document_chosen\",\n \"result\": {\n \"id\": document.id,\n \"title\": document.title,\n \"url\": document.url,\n \"filename\": document.filename,\n \"edit_link\": reverse(\"wagtaildocs:edit\", args=(document.id,)),\n },\n },\n )\n\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 187, + "n_words": 28, + "vocab_size": 25, + "complexity": 1, + "nloc": 17, + "token_counts": 74, + "n_ast_nodes": 121, + "n_identifiers": 11, + "d_id": 16329, + "documentation": { + "docstring": "\n helper function: given a document, return the modal workflow response that returns that\n document back to the calling page\n ", + "n_words": 19, + "vocab_size": 17, + "n_whitespaces": 29, + "language": "en" + } + }, + { + "id": 63798, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_vendor/tenacity/__init__.py", + "file_name": "__init__.py", + "fun_name": "call", + "commit_message": "upd; format", + "code": "def call(self, *args, **kwargs):\n \n warnings.warn(\n \"'call()' method is deprecated. \" + \"Use '__call__()' instead\",\n DeprecationWarning,\n )\n return self.__call__(*args, **kwargs)\n\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 69, + "n_words": 19, + "vocab_size": 19, + "complexity": 1, + "nloc": 6, + "token_counts": 34, + "n_ast_nodes": 58, + "n_identifiers": 8, + "d_id": 13507, + "documentation": { + "docstring": "Use ``__call__`` instead because this method is deprecated.", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 64559, + "commit_id": "fe4b6771b5fd935ed278cf553c864a18e3356a33", + "repo": "erpnext", + "path": "erpnext/regional/report/irs_1099/irs_1099.py", + "file_name": "irs_1099.py", + "fun_name": "execute", + "commit_message": "refactor: Remove dead code (#30140)", + "code": "def execute(filters=None):\n\tfilters = filters if isinstance(filters, frappe._dict) else frappe._dict(filters)\n\tif not filters:\n\t\tfilters.setdefault('fiscal_year', get_fiscal_year(nowdate())[0])\n\t\tfilters.setdefault('company', frappe.db.get_default(\"company\"))\n\n\tregion = frappe.db.get_value(\"Company\",\n\t\tfilters={\"name\": filters.company},\n\t\tfieldname=[\"country\"])\n\n\tif region != 'United States':\n\t\treturn [], []\n\n\tcolumns = get_columns()\n\tconditions = \"\"\n\tif filters.supplier_group:\n\t\tconditions += \"AND s.supplier_group = %s\" %frappe.db.escape(filters.get(\"supplier_group\"))\n\n\tdata = frappe.db.sql(.format(conditions=conditions), {\n\t\t\t\t\"fiscal_year\": filters.fiscal_year,\n\t\t\t\t\"company\": filters.company\n\t\t\t}, as_dict=True)\n\n\treturn columns, data\n\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 40, + "n_words": 59, + "vocab_size": 46, + "complexity": 5, + "nloc": 41, + "token_counts": 171, + "n_ast_nodes": 283, + "n_identifiers": 25, + "d_id": 13666, + "documentation": { + "docstring": "\n\t\tSELECT\n\t\t\ts.supplier_group as \"supplier_group\",\n\t\t\tgl.party AS \"supplier\",\n\t\t\ts.tax_id as \"tax_id\",\n\t\t\tSUM(gl.debit_in_account_currency) AS \"payments\"\n\t\tFROM\n\t\t\t`tabGL Entry` gl\n\t\t\t\tINNER JOIN `tabSupplier` s\n\t\tWHERE\n\t\t\ts.name = gl.party\n\t\t\t\tAND s.irs_1099 = 1\n\t\t\t\tAND gl.fiscal_year = %(fiscal_year)s\n\t\t\t\tAND gl.party_type = \"Supplier\"\n\t\t\t\tAND gl.company = %(company)s\n\t\t\t\t{conditions}\n\n\t\tGROUP BY\n\t\t\tgl.party\n\n\t\tORDER BY\n\t\t\tgl.party DESC", + "n_words": 49, + "vocab_size": 36, + "n_whitespaces": 30, + "language": "en" + } + }, + { + "id": 65944, + "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", + "repo": "erpnext", + "path": "erpnext/education/report/student_batch_wise_attendance/student_batch_wise_attendance.py", + "file_name": "student_batch_wise_attendance.py", + "fun_name": "get_student_group_strength", + "commit_message": "style: format code with black", + "code": "def get_student_group_strength(student_group):\n\tstudent_group_strength = frappe.db.sql(\n\t\t,\n\t\tstudent_group,\n\t)[0][0]\n\treturn student_group_strength\n\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 4, + "n_words": 10, + "vocab_size": 9, + "complexity": 1, + "nloc": 7, + "token_counts": 26, + "n_ast_nodes": 41, + "n_identifiers": 6, + "d_id": 14064, + "documentation": { + "docstring": "select count(*) from `tabStudent Group Student`\n\t\twhere parent = %s and active=1", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 101569, + "commit_id": "7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5", + "repo": "faceswap", + "path": "lib/training/preview_tk.py", + "file_name": "preview_tk.py", + "fun_name": "source", + "commit_message": "Training - Use custom preview pop-out", + "code": "def source(self) -> \"np.ndarray\":\n \n assert self._source is not None\n return self._source\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 7, + "n_whitespaces": 32, + "n_words": 11, + "vocab_size": 10, + "complexity": 1, + "nloc": 4, + "token_counts": 19, + "n_ast_nodes": 34, + "n_identifiers": 3, + "d_id": 20979, + "documentation": { + "docstring": " :class:`PIL.Image.Image`: The current source preview image ", + "n_words": 6, + "vocab_size": 6, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 108516, + "commit_id": "032316bc6c7798fca6c82de24167c975f237687f", + "repo": "matplotlib", + "path": "lib/matplotlib/pyplot.py", + "file_name": "pyplot.py", + "fun_name": "summer", + "commit_message": "Cleanup documentation generation for pyplot\n\n- remove the awkward `pyplot.plotting()` function, which only served\n as a namespace to take up the docs for pyplot and output them via\n `.. autofunction`\n- Instead generate the same information using `.. autosummary::`. We\n have to list the desired methods here explicitly. I've added a test\n that these are the same as previously auto-generated in the\n `plotting()` docstring. If we change anything in pyplot, we'll be\n notified through the test failure that we have to adapt the\n autosummary list.\n- Removed the docstring generation logic\n `_setup_pyplot_info_docstrings()`. Apart from generating the\n `plotting()` docstring, this added docstrings to the pyplot colormap\n setters. Instead, we now add these docstrings directly via\n boilerplate.py\n\nCo-authored-by: Elliott Sales de Andrade ", + "code": "def summer():\n \n set_cmap('summer')\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.", + "url": "https://github.com/matplotlib/matplotlib.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 21, + "n_words": 15, + "vocab_size": 15, + "complexity": 1, + "nloc": 2, + "token_counts": 9, + "n_ast_nodes": 22, + "n_identifiers": 2, + "d_id": 23227, + "documentation": { + "docstring": "\n Set the colormap to 'summer'.\n\n This changes the default colormap as well as the colormap of the current\n image if there is one. See ``help(colormaps)`` for more information.\n ", + "n_words": 28, + "vocab_size": 22, + "n_whitespaces": 41, + "language": "en" + } + }, + { + "id": 246168, + "commit_id": "901b264c0c88f39cbfb8b2229e0dc57968882658", + "repo": "synapse", + "path": "tests/rest/admin/test_user.py", + "file_name": "test_user.py", + "fun_name": "test_all_users", + "commit_message": "Add type hints to `tests/rest/admin` (#11851)", + "code": "def test_all_users(self) -> None:\n \n self._create_users(2)\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?deactivated=true\",\n {},\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n self.assertEqual(3, len(channel.json_body[\"users\"]))\n self.assertEqual(3, channel.json_body[\"total\"])\n\n # Check that all fields are available\n self._check_fields(channel.json_body[\"users\"])\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 137, + "n_words": 30, + "vocab_size": 29, + "complexity": 1, + "nloc": 15, + "token_counts": 96, + "n_ast_nodes": 157, + "n_identifiers": 16, + "d_id": 71062, + "documentation": { + "docstring": "\n List all users, including deactivated users.\n ", + "n_words": 6, + "vocab_size": 6, + "n_whitespaces": 21, + "language": "en" + } + }, + { + "id": 133619, + "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", + "repo": "ray", + "path": "release/tune_tests/cloud_tests/workloads/run_cloud_test.py", + "file_name": "run_cloud_test.py", + "fun_name": "test_ssh_sync", + "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", + "code": "def test_ssh_sync():\n \n experiment_name = \"cloud_ssh_sync\"\n indicator_file = f\"/tmp/{experiment_name}_indicator\"\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 17, + "n_words": 8, + "vocab_size": 7, + "complexity": 2, + "nloc": 16, + "token_counts": 70, + "n_ast_nodes": 30, + "n_identifiers": 3, + "d_id": 30062, + "documentation": { + "docstring": "\n SSH syncing, so:\n\n syncer=\"auto\"\n upload_dir=None\n\n Expected results after first checkpoint:\n\n - 4 trials are running\n - At least one trial ran on the head node\n - At least one trial ran remotely\n - Driver has trial checkpoints from head node trial\n - Driver has trial checkpoints from remote node trials\n - Remote trial dirs only have data for one trial\n - Remote trial dirs have checkpoints for node-local trials\n\n Then, remote checkpoint directories are cleaned up.\n\n Expected results after second checkpoint:\n\n - 4 trials are running\n - All trials progressed with training\n\n ", + "n_words": 92, + "vocab_size": 47, + "n_whitespaces": 185, + "language": "en" + } + }, + { + "id": 46884, + "commit_id": "91832a42d8124b040073481fd93c54e9e64c2609", + "repo": "airflow", + "path": "airflow/models/mappedoperator.py", + "file_name": "mappedoperator.py", + "fun_name": "parse_time_mapped_ti_count", + "commit_message": "Expand mapped tasks at DagRun.Veriy_integrity (#22679)\n\nCreate the necessary task instances for a mapped task at dagrun.verify_integrity\r\n\r\nCo-authored-by: Ash Berlin-Taylor ", + "code": "def parse_time_mapped_ti_count(self) -> Optional[int]:\n \n total = 0\n\n for value in self._get_expansion_kwargs().values():\n if not isinstance(value, MAPPABLE_LITERAL_TYPES):\n # None literal type encountered, so give up\n return None\n total += len(value)\n return total\n", + "url": "https://github.com/apache/airflow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 110, + "n_words": 30, + "vocab_size": 26, + "complexity": 3, + "nloc": 13, + "token_counts": 46, + "n_ast_nodes": 77, + "n_identifiers": 11, + "d_id": 9032, + "documentation": { + "docstring": "\n Number of mapped TaskInstances that can be created at DagRun create time.\n\n :return: None if non-literal mapped arg encountered, or else total number of mapped TIs this task\n should have\n ", + "n_words": 30, + "vocab_size": 27, + "n_whitespaces": 63, + "language": "en" + } + }, + { + "id": 62143, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/util.py", + "file_name": "util.py", + "fun_name": "convert_path", + "commit_message": "upd; format", + "code": "def convert_path(pathname):\n \n if os.sep == '/':\n return pathname\n if not pathname:\n return pathname\n if pathname[0] == '/':\n raise ValueError(\"path '%s' cannot be absolute\" % pathname)\n if pathname[-1] == '/':\n raise ValueError(\"path '%s' cannot end with '/'\" % pathname)\n\n paths = pathname.split('/')\n while os.curdir in paths:\n paths.remove(os.curdir)\n if not paths:\n return os.curdir\n return os.path.join(*paths)\n\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 122, + "n_words": 53, + "vocab_size": 32, + "complexity": 7, + "nloc": 15, + "token_counts": 93, + "n_ast_nodes": 163, + "n_identifiers": 11, + "d_id": 12879, + "documentation": { + "docstring": "Return 'pathname' as a name that will work on the native filesystem.\n\n The path is split on '/' and put back together again using the current\n directory separator. Needed because filenames in the setup script are\n always supplied in Unix style, and have to be converted to the local\n convention before we can actually use them in the filesystem. Raises\n ValueError on non-Unix-ish systems if 'pathname' either starts or\n ends with a slash.\n ", + "n_words": 73, + "vocab_size": 60, + "n_whitespaces": 96, + "language": "en" + } + }, + { + "id": 298084, + "commit_id": "653805584bac4907ba6da527071bceb9d7313019", + "repo": "core", + "path": "homeassistant/components/lookin/coordinator.py", + "file_name": "coordinator.py", + "fun_name": "_async_update_data", + "commit_message": "Improve `lookin` generic typing (#84636)", + "code": "async def _async_update_data(self) -> _DataT:\n \n interval = self.update_interval\n if (\n interval is not None\n and self.last_update_success\n and self.data\n and self.push_coordinator.active(interval)\n ):\n data = self.data\n else:\n data = await super()._async_update_data()\n return data\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 139, + "n_words": 31, + "vocab_size": 23, + "complexity": 5, + "nloc": 13, + "token_counts": 57, + "n_ast_nodes": 96, + "n_identifiers": 10, + "d_id": 97031, + "documentation": { + "docstring": "Fetch data only if we have not received a push inside the interval.", + "n_words": 13, + "vocab_size": 13, + "n_whitespaces": 12, + "language": "en" + } + }, + { + "id": 110331, + "commit_id": "383de519505964ed879c40b23ef36e90c17ebe0d", + "repo": "matplotlib", + "path": "lib/matplotlib/backends/backend_ps.py", + "file_name": "backend_ps.py", + "fun_name": "gs_distill", + "commit_message": "[Doc] fix more spelling and grammar", + "code": "def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):\n \n\n if eps:\n paper_option = \"-dEPSCrop\"\n else:\n paper_option = \"-sPAPERSIZE=%s\" % ptype\n\n psfile = tmpfile + '.ps'\n dpi = mpl.rcParams['ps.distiller.res']\n\n cbook._check_and_log_subprocess(\n [mpl._get_executable_info(\"gs\").executable,\n \"-dBATCH\", \"-dNOPAUSE\", \"-r%d\" % dpi, \"-sDEVICE=ps2write\",\n paper_option, \"-sOutputFile=%s\" % psfile, tmpfile],\n _log)\n\n os.remove(tmpfile)\n shutil.move(psfile, tmpfile)\n\n # While it is best if above steps preserve the original bounding\n # box, there seem to be cases when it is not. For those cases,\n # the original bbox can be restored during the pstoeps step.\n\n if eps:\n # For some versions of gs, above steps result in a ps file where the\n # original bbox is no more correct. Do not adjust bbox for now.\n pstoeps(tmpfile, bbox, rotated=rotated)\n\n", + "url": "https://github.com/matplotlib/matplotlib.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 213, + "n_words": 112, + "vocab_size": 85, + "complexity": 3, + "nloc": 16, + "token_counts": 110, + "n_ast_nodes": 189, + "n_identifiers": 21, + "d_id": 24070, + "documentation": { + "docstring": "\n Use ghostscript's pswrite or epswrite device to distill a file.\n This yields smaller files without illegal encapsulated postscript\n operators. The output is low-level, converting text to outlines.\n ", + "n_words": 27, + "vocab_size": 26, + "n_whitespaces": 40, + "language": "en" + } + }, + { + "id": 222526, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/dis.py", + "file_name": "dis.py", + "fun_name": "_get_const_info", + "commit_message": "add python 3.10.4 for windows", + "code": "def _get_const_info(const_index, const_list):\n \n argval = const_index\n if const_list is not None:\n argval = const_list[const_index]\n return argval, repr(argval)\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 36, + "n_words": 17, + "vocab_size": 15, + "complexity": 2, + "nloc": 5, + "token_counts": 30, + "n_ast_nodes": 49, + "n_identifiers": 5, + "d_id": 56617, + "documentation": { + "docstring": "Helper to get optional details about const references\n\n Returns the dereferenced constant and its repr if the constant\n list is defined.\n Otherwise returns the constant index and its repr().\n ", + "n_words": 29, + "vocab_size": 23, + "n_whitespaces": 50, + "language": "en" + } + }, + { + "id": 200032, + "commit_id": "2ddc46704dffa81a5a2a8df4348bf98dff07ebd5", + "repo": "sympy", + "path": "sympy/physics/continuum_mechanics/truss.py", + "file_name": "truss.py", + "fun_name": "draw", + "commit_message": "subs_dict added to the draw method", + "code": "def draw(self, subs_dict=None):\n \n if not numpy:\n raise ImportError(\"To use this function numpy module is required\")\n\n x = Symbol('x')\n\n markers = []\n annotations = []\n rectangles = []\n\n node_markers = self._draw_nodes(subs_dict)\n markers += node_markers\n\n member_rectangles = self._draw_members()\n rectangles += member_rectangles\n\n support_markers = self._draw_supports()\n markers += support_markers\n\n load_annotations = self._draw_loads()\n annotations += load_annotations\n\n xmax = -INF\n xmin = INF\n ymax = -INF\n ymin = INF\n\n for node in list(self._node_coordinates):\n xmax = max(xmax, self._node_coordinates[node][0])\n xmin = min(xmin, self._node_coordinates[node][0])\n ymax = max(ymax, self._node_coordinates[node][1])\n ymin = min(ymin, self._node_coordinates[node][1])\n\n lim = max(xmax*1.1-xmin*0.8+1, ymax*1.1-ymin*0.8+1)\n\n if lim==xmax*1.1-xmin*0.8+1:\n sing_plot = plot(1, (x, 1, 1), markers=markers, show=False, annotations=annotations, xlim=(xmin-0.05*lim, xmax*1.1), ylim=(xmin-0.05*lim, xmax*1.1), axis=False, rectangles=rectangles)\n else:\n sing_plot = plot(1, (x, 1, 1), markers=markers, show=False, annotations=annotations, xlim=(ymin-0.05*lim, ymax*1.1), ylim=(ymin-0.05*lim, ymax*1.1), axis=False, rectangles=rectangles)\n\n return sing_plot\n\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 360, + "n_words": 122, + "vocab_size": 69, + "complexity": 4, + "nloc": 30, + "token_counts": 359, + "n_ast_nodes": 495, + "n_identifiers": 35, + "d_id": 49498, + "documentation": { + "docstring": "\n Returns a plot object of the Truss with all its nodes, members,\n supports and loads.\n\n .. note::\n The user must be careful while entering load values in their\n directions. The draw function assumes a sign convention that\n is used for plotting loads.\n\n Given a right-handed coordinate system with XYZ coordinates,\n the supports are assumed to be such that the reaction forces of a\n pinned support is in the +X and +Y direction while those of a\n roller support is in the +Y direction. For the load, the range\n of angles, one can input goes all the way to 360 degrees which, in the\n the plot is the angle that the load vector makes with the positive x-axis in the anticlockwise direction.\n\n For example, for a 90-degree angle, the load will be a vertically\n directed along +Y while a 270-degree angle denotes a vertical\n load as well but along -Y.\n\n Examples\n ========\n\n .. plot::\n :context: close-figs\n :format: doctest\n :include-source: True\n\n >>> from sympy.physics.continuum_mechanics.truss import Truss\n >>> import math\n >>> t = Truss()\n >>> t.add_node(\"A\", -4, 0)\n >>> t.add_node(\"B\", 0, 0)\n >>> t.add_node(\"C\", 4, 0)\n >>> t.add_node(\"D\", 8, 0)\n >>> t.add_node(\"E\", 6, 2/math.sqrt(3))\n >>> t.add_node(\"F\", 2, 2*math.sqrt(3))\n >>> t.add_node(\"G\", -2, 2/math.sqrt(3))\n >>> t.add_member(\"AB\",\"A\",\"B\")\n >>> t.add_member(\"BC\",\"B\",\"C\")\n >>> t.add_member(\"CD\",\"C\",\"D\")\n >>> t.add_member(\"AG\",\"A\",\"G\")\n >>> t.add_member(\"GB\",\"G\",\"B\")\n >>> t.add_member(\"GF\",\"G\",\"F\")\n >>> t.add_member(\"BF\",\"B\",\"F\")\n >>> t.add_member(\"FC\",\"F\",\"C\")\n >>> t.add_member(\"CE\",\"C\",\"E\")\n >>> t.add_member(\"FE\",\"F\",\"E\")\n >>> t.add_member(\"DE\",\"D\",\"E\")\n >>> t.apply_support(\"A\",\"pinned\")\n >>> t.apply_support(\"D\",\"roller\")\n >>> t.apply_load(\"G\", 3, 90)\n >>> t.apply_load(\"E\", 3, 90)\n >>> t.apply_load(\"F\", 2, 90)\n >>> p = t.draw()\n >>> p\n Plot object containing:\n [0]: cartesian line: 1 for x over (1.0, 1.0)\n >>> p.show()\n ", + "n_words": 257, + "vocab_size": 156, + "n_whitespaces": 813, + "language": "en" + } + }, + { + "id": 31221, + "commit_id": "49becbaa5549b477b0d96c55f207614773c0ab42", + "repo": "transformers", + "path": "src/transformers/image_utils.py", + "file_name": "image_utils.py", + "fun_name": "center_crop", + "commit_message": "Enable crop_center method to handle (W, H, C) images (#17626)\n\n* enable crop_center method to handle (W, H, C) images\r\n\r\n* minor style and comment edits", + "code": "def center_crop(self, image, size):\n \n self._ensure_format_supported(image)\n\n if not isinstance(size, tuple):\n size = (size, size)\n\n # PIL Image.size is (width, height) but NumPy array and torch Tensors have (height, width)\n if is_torch_tensor(image) or isinstance(image, np.ndarray):\n if image.ndim == 2:\n image = self.expand_dims(image)\n image_shape = image.shape[1:] if image.shape[0] in [1, 3] else image.shape[:2]\n else:\n image_shape = (image.size[1], image.size[0])\n\n top = (image_shape[0] - size[0]) // 2\n bottom = top + size[0] # In case size is odd, (image_shape[0] + size[0]) // 2 won't give the proper result.\n left = (image_shape[1] - size[1]) // 2\n right = left + size[1] # In case size is odd, (image_shape[1] + size[1]) // 2 won't give the proper result.\n\n # For PIL Images we have a method to crop directly.\n if isinstance(image, PIL.Image.Image):\n return image.crop((left, top, right, bottom))\n\n # Check if image is in (n_channels, height, width) or (height, width, n_channels) format\n channel_first = True if image.shape[0] in [1, 3] else False\n\n # Transpose (height, width, n_channels) format images\n if not channel_first:\n if isinstance(image, np.ndarray):\n image = image.transpose(2, 0, 1)\n if is_torch_tensor(image):\n image = image.permute(2, 0, 1)\n\n # Check if cropped area is within image boundaries\n if top >= 0 and bottom <= image_shape[0] and left >= 0 and right <= image_shape[1]:\n return image[..., top:bottom, left:right]\n\n # Otherwise, we may need to pad if the image is too small. Oh joy...\n new_shape = image.shape[:-2] + (max(size[0], image_shape[0]), max(size[1], image_shape[1]))\n if isinstance(image, np.ndarray):\n new_image = np.zeros_like(image, shape=new_shape)\n elif is_torch_tensor(image):\n new_image = image.new_zeros(new_shape)\n\n top_pad = (new_shape[-2] - image_shape[0]) // 2\n bottom_pad = top_pad + image_shape[0]\n left_pad = (new_shape[-1] - image_shape[1]) // 2\n right_pad = left_pad + image_shape[1]\n new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image\n\n top += top_pad\n bottom += top_pad\n left += left_pad\n right += left_pad\n\n new_image = new_image[\n ..., max(0, top) : min(new_image.shape[-2], bottom), max(0, left) : min(new_image.shape[-1], right)\n ]\n\n return new_image\n", + "url": "https://github.com/huggingface/transformers.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 707, + "n_words": 301, + "vocab_size": 156, + "complexity": 17, + "nloc": 42, + "token_counts": 474, + "n_ast_nodes": 714, + "n_identifiers": 34, + "d_id": 5704, + "documentation": { + "docstring": "\n Crops `image` to the given size using a center crop. Note that if the image is too small to be cropped to the\n size given, it will be padded (so the returned result has the size asked).\n\n Args:\n image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape (n_channels, height, width) or (height, width, n_channels)):\n The image to resize.\n size (`int` or `Tuple[int, int]`):\n The size to which crop the image.\n\n Returns:\n new_image: A center cropped `PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape: (n_channels,\n height, width).\n ", + "n_words": 84, + "vocab_size": 55, + "n_whitespaces": 194, + "language": "en" + } + }, + { + "id": 270710, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/engine/base_layer.py", + "file_name": "base_layer.py", + "fun_name": "input", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def input(self):\n \n if not self._inbound_nodes:\n raise AttributeError(\n \"Layer \" + self.name + \" is not connected, no input to return.\"\n )\n return self._get_node_attribute_at_index(0, \"input_tensors\", \"input\")\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 83, + "n_words": 25, + "vocab_size": 22, + "complexity": 2, + "nloc": 6, + "token_counts": 34, + "n_ast_nodes": 63, + "n_identifiers": 6, + "d_id": 80534, + "documentation": { + "docstring": "Retrieves the input tensor(s) of a layer.\n\n Only applicable if the layer has exactly one input,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Input tensor or list of input tensors.\n\n Raises:\n RuntimeError: If called in Eager mode.\n AttributeError: If no inbound nodes are found.\n ", + "n_words": 47, + "vocab_size": 40, + "n_whitespaces": 111, + "language": "en" + } + }, + { + "id": 22541, + "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", + "repo": "Python", + "path": "XORcipher/XOR_cipher.py", + "file_name": "XOR_cipher.py", + "fun_name": "decrypt_file", + "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", + "code": "def decrypt_file(self, file, key):\n \n\n # precondition\n assert isinstance(file, str) and isinstance(key, int)\n\n try:\n with open(file, \"r\") as fin:\n with open(\"decrypt.out\", \"w+\") as fout:\n # actual encrypt-process\n for line in fin:\n fout.write(self.decrypt_string(line, key))\n\n except:\n return False\n\n return True\n\n\n# Tests\n# crypt = XORCipher()\n# key = 67\n\n# # test enrcypt\n# print crypt.encrypt(\"hallo welt\",key)\n# # test decrypt\n# print crypt.decrypt(crypt.encrypt(\"hallo welt\",key), key)\n\n# # test encrypt_string\n# print crypt.encrypt_string(\"hallo welt\",key)\n\n# # test decrypt_string\n# print crypt.decrypt_string(crypt.encrypt_string(\"hallo welt\",key),key)\n\n# if (crypt.encrypt_file(\"test.txt\",key)):\n# \tprint \"encrypt successful\"\n# else:\n# \tprint \"encrypt unsuccessful\"\n\n# if (crypt.decrypt_file(\"encrypt.out\",key)):\n# \tprint \"decrypt successful\"\n# else:\n# \tprint \"decrypt unsuccessful\"\n", + "url": "https://github.com/geekcomputers/Python.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 227, + "n_words": 106, + "vocab_size": 60, + "complexity": 4, + "nloc": 10, + "token_counts": 70, + "n_ast_nodes": 141, + "n_identifiers": 13, + "d_id": 4357, + "documentation": { + "docstring": "\n input: filename (str) and a key (int)\n output: returns true if decrypt process was\n successful otherwise false\n if key not passed the method uses the key by the constructor.\n otherwise key = 1\n ", + "n_words": 33, + "vocab_size": 26, + "n_whitespaces": 76, + "language": "en" + } + }, + { + "id": 248275, + "commit_id": "57f6c496d0e26b1b455de936bd950e1899a5ae25", + "repo": "synapse", + "path": "synapse/rest/media/v1/preview_url_resource.py", + "file_name": "preview_url_resource.py", + "fun_name": "_expire_url_cache_data", + "commit_message": "URL preview cache expiry logs: INFO -> DEBUG, text clarifications (#12720)", + "code": "async def _expire_url_cache_data(self) -> None:\n \n\n assert self._worker_run_media_background_jobs\n\n now = self.clock.time_msec()\n\n logger.debug(\"Running url preview cache expiry\")\n\n if not (await self.store.db_pool.updates.has_completed_background_updates()):\n logger.debug(\"Still running DB updates; skipping url preview cache expiry\")\n return\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 86, + "n_words": 29, + "vocab_size": 25, + "complexity": 12, + "nloc": 68, + "token_counts": 329, + "n_ast_nodes": 92, + "n_identifiers": 12, + "d_id": 72189, + "documentation": { + "docstring": "Clean up expired url cache content, media and thumbnails.", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 104387, + "commit_id": "e35be138148333078284b942ccc9ed7b1d826f97", + "repo": "datasets", + "path": "src/datasets/dataset_dict.py", + "file_name": "dataset_dict.py", + "fun_name": "reset_format", + "commit_message": "Update docs to new frontend/UI (#3690)\n\n* WIP: update docs to new UI\r\n\r\n* make style\r\n\r\n* Rm unused\r\n\r\n* inject_arrow_table_documentation __annotations__\r\n\r\n* hasattr(arrow_table_method, \"__annotations__\")\r\n\r\n* Update task_template.rst\r\n\r\n* Codeblock PT-TF-SPLIT\r\n\r\n* Convert loading scripts\r\n\r\n* Convert docs to mdx\r\n\r\n* Fix mdx\r\n\r\n* Add \r\n\r\n* Convert mdx tables\r\n\r\n* Fix codeblock\r\n\r\n* Rm unneded hashlinks\r\n\r\n* Update index.mdx\r\n\r\n* Redo dev change\r\n\r\n* Rm circle ci `build_doc` & `deploy_doc`\r\n\r\n* Rm unneeded files\r\n\r\n* Update docs reamde\r\n\r\n* Standardize to `Example::`\r\n\r\n* mdx logging levels doc\r\n\r\n* Table properties inject_arrow_table_documentation\r\n\r\n* ``` to ```py mdx\r\n\r\n* Add Tips mdx\r\n\r\n* important,None -> \r\n\r\n* More misc\r\n\r\n* Center imgs\r\n\r\n* Update instllation page\r\n\r\n* `setup.py` docs section\r\n\r\n* Rm imgs since they are in hf.co\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* Update index mdx\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* just `Dataset` obj\r\n\r\n* Addedversion just italics\r\n\r\n* Update ReadInstruction doc example syntax\r\n\r\n* Change docstring for `prepare_for_task`\r\n\r\n* Chore\r\n\r\n* Remove `code` syntax from headings\r\n\r\n* Rm `code` syntax from headings\r\n\r\n* Hashlink backward compatability\r\n\r\n* S3FileSystem doc\r\n\r\n* S3FileSystem doc updates\r\n\r\n* index.mdx updates\r\n\r\n* Add darkmode gifs\r\n\r\n* Index logo img css classes\r\n\r\n* Index mdx dataset logo img size\r\n\r\n* Docs for DownloadMode class\r\n\r\n* Doc DownloadMode table\r\n\r\n* format docstrings\r\n\r\n* style\r\n\r\n* Add doc builder scripts (#3790)\r\n\r\n* add doc builder scripts\r\n\r\n* fix docker image\r\n\r\n* Docs new UI actions no self hosted (#3793)\r\n\r\n* No self hosted\r\n\r\n* replace doc injection by actual docstrings\r\n\r\n* Docstring formatted\r\n\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Mishig Davaadorj \r\n\r\nCo-authored-by: Lysandre Debut \r\nCo-authored-by: Mishig Davaadorj \r\n\r\n* Rm notebooks from docs actions since they dont exi\r\n\r\n* Update tsting branch\r\n\r\n* More docstring\r\n\r\n* Chore\r\n\r\n* bump up node version\r\n\r\n* bump up node\r\n\r\n* ``` -> ```py for audio_process.mdx\r\n\r\n* Update .github/workflows/build_documentation.yml\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Uodate dev doc build\r\n\r\n* remove run on PR\r\n\r\n* fix action\r\n\r\n* Fix gh doc workflow\r\n\r\n* forgot this change when merging master\r\n\r\n* Update build doc\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: Lysandre Debut ", + "code": "def reset_format(self):\n \n self._check_values_type()\n for dataset in self.values():\n dataset.set_format()\n", + "url": "https://github.com/huggingface/datasets.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 40, + "n_words": 8, + "vocab_size": 8, + "complexity": 2, + "nloc": 4, + "token_counts": 25, + "n_ast_nodes": 45, + "n_identifiers": 6, + "d_id": 21824, + "documentation": { + "docstring": "Reset ``__getitem__`` return format to python objects and all columns.\n The transformation is applied to all the datasets of the dataset dictionary.\n\n Same as ``self.set_format()``\n ", + "n_words": 25, + "vocab_size": 22, + "n_whitespaces": 46, + "language": "en" + } + }, + { + "id": 118633, + "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", + "repo": "streamlit", + "path": "lib/tests/streamlit/scriptrunner/script_runner_test.py", + "file_name": "script_runner_test.py", + "fun_name": "_assert_text_deltas", + "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", + "code": "def _assert_text_deltas(self, scriptrunner, text_deltas):\n \n self.assertEqual(text_deltas, scriptrunner.text_deltas())\n\n", + "url": "https://github.com/streamlit/streamlit.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 20, + "n_words": 6, + "vocab_size": 6, + "complexity": 1, + "nloc": 2, + "token_counts": 22, + "n_ast_nodes": 36, + "n_identifiers": 5, + "d_id": 26337, + "documentation": { + "docstring": "Asserts that the scriptrunner's ForwardMsgQueue contains text deltas\n with the given contents.\n\n Parameters\n ----------\n scriptrunner : TestScriptRunner\n text_deltas : List[str]\n\n ", + "n_words": 20, + "vocab_size": 18, + "n_whitespaces": 62, + "language": "en" + } + }, + { + "id": 156222, + "commit_id": "261bf174931580230717abca93fe172e166cc1e8", + "repo": "dask", + "path": "dask/utils.py", + "file_name": "utils.py", + "fun_name": "typename", + "commit_message": "Add mild typing to common utils functions (#8848)", + "code": "def typename(typ, short=False) -> str:\n \n if not isinstance(typ, type):\n return typename(type(typ))\n try:\n if not typ.__module__ or typ.__module__ == \"builtins\":\n return typ.__name__\n else:\n if short:\n module, *_ = typ.__module__.split(\".\")\n else:\n module = typ.__module__\n return module + \".\" + typ.__name__\n except AttributeError:\n return str(typ)\n\n", + "url": "https://github.com/dask/dask.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 156, + "n_words": 42, + "vocab_size": 29, + "complexity": 6, + "nloc": 28, + "token_counts": 88, + "n_ast_nodes": 150, + "n_identifiers": 12, + "d_id": 36605, + "documentation": { + "docstring": "\n Return the name of a type\n\n Examples\n --------\n >>> typename(int)\n 'int'\n\n >>> from dask.core import literal\n >>> typename(literal)\n 'dask.core.literal'\n >>> typename(literal, short=True)\n 'dask.literal'\n ", + "n_words": 23, + "vocab_size": 20, + "n_whitespaces": 57, + "language": "en" + } + }, + { + "id": 128150, + "commit_id": "9c39a28ba2f6221ffd8327fa21cb8294f0390fee", + "repo": "ray", + "path": "python/ray/data/tests/test_batch_mapper.py", + "file_name": "test_batch_mapper.py", + "fun_name": "test_batch_mapper_pandas_data_format", + "commit_message": "[AIR][Numpy] Add numpy narrow waist to `Preprocessor` and `BatchMapper` (#28418)\n\nCo-authored-by: Eric Liang \r\nCo-authored-by: Clark Zinzow \r\nCo-authored-by: Amog Kamsetty ", + "code": "def test_batch_mapper_pandas_data_format(ds_with_expected_pandas_numpy_df):\n \n ds, expected_df, expected_numpy_df = ds_with_expected_pandas_numpy_df\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 7, + "n_whitespaces": 13, + "n_words": 7, + "vocab_size": 7, + "complexity": 1, + "nloc": 20, + "token_counts": 145, + "n_ast_nodes": 23, + "n_identifiers": 5, + "d_id": 28613, + "documentation": { + "docstring": "Tests batch mapper functionality for pandas data format.\n\n Note:\n For single column pandas dataframes, we automatically convert it to\n single column tensor with column name as `__value__`.\n ", + "n_words": 27, + "vocab_size": 23, + "n_whitespaces": 47, + "language": "en" + } + }, + { + "id": 20845, + "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", + "repo": "pipenv", + "path": "pipenv/patched/notpip/_vendor/rich/syntax.py", + "file_name": "syntax.py", + "fun_name": "lexer", + "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", + "code": "def lexer(self) -> Optional[Lexer]:\n \n\n if isinstance(self._lexer, Lexer):\n return self._lexer\n try:\n return get_lexer_by_name(\n self._lexer,\n stripnl=False,\n ensurenl=True,\n tabsize=self.tab_size,\n )\n except ClassNotFound:\n return None\n", + "url": "https://github.com/pypa/pipenv.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 153, + "n_words": 21, + "vocab_size": 19, + "complexity": 3, + "nloc": 16, + "token_counts": 54, + "n_ast_nodes": 83, + "n_identifiers": 12, + "d_id": 3587, + "documentation": { + "docstring": "The lexer for this syntax, or None if no lexer was found.\n\n Tries to find the lexer by name if a string was passed to the constructor.\n ", + "n_words": 27, + "vocab_size": 21, + "n_whitespaces": 41, + "language": "en" + } + }, + { + "id": 125637, + "commit_id": "90cea203befa8f2e86e9c1c18bb3972296358e7b", + "repo": "ray", + "path": "python/ray/runtime_context.py", + "file_name": "runtime_context.py", + "fun_name": "get_placement_group_id", + "commit_message": "Ray 2.0 API deprecation (#26116)\n\nRay 2.0 API deprecation for:\r\n\r\n ray.remote(): placement_group\r\n ray.remote(): placement_group_bundle_index\r\n ray.remote(): placement_group_capture_child_tasks\r\n ray.get_dashboard_url()\r\n ray.get_resource_ids()\r\n ray.disconnect()\r\n ray.connect()\r\n ray.util.ActorGroup\r\n ray.util.ActorPool\r\n Add get_xx_id() to return hex (rather than object), and then deprecate the xx_id() (which returns Cython object): the xx here can be node, task etc.\r\n ray start: --plasma-store-socket-name\r\n ray start: --raylet-socket-name", + "code": "def get_placement_group_id(self) -> Optional[str]:\n \n pg_id = self.worker.placement_group_id\n return pg_id.hex() if not pg_id.is_nil() else None\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 35, + "n_words": 14, + "vocab_size": 14, + "complexity": 2, + "nloc": 8, + "token_counts": 33, + "n_ast_nodes": 55, + "n_identifiers": 9, + "d_id": 27934, + "documentation": { + "docstring": "Get the current Placement group ID of this worker.\n\n Returns:\n The current placement group id in hex format of this worker.\n ", + "n_words": 21, + "vocab_size": 16, + "n_whitespaces": 46, + "language": "en" + } + }, + { + "id": 160837, + "commit_id": "cafec60a5e28af98fb8798049edd7942720d2d74", + "repo": "numpy", + "path": "numpy/testing/tests/test_utils.py", + "file_name": "test_utils.py", + "fun_name": "test_array_vs_scalar_is_equal", + "commit_message": "ENH: Add strict parameter to assert_array_equal. (#21595)\n\nFixes #9542\r\n\r\nCo-authored-by: Bas van Beek <43369155+BvB93@users.noreply.github.com>", + "code": "def test_array_vs_scalar_is_equal(self):\n \n a = np.array([1., 1., 1.])\n b = 1.\n\n self._test_equal(a, b)\n", + "url": "https://github.com/numpy/numpy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 40, + "n_words": 12, + "vocab_size": 11, + "complexity": 1, + "nloc": 4, + "token_counts": 35, + "n_ast_nodes": 50, + "n_identifiers": 7, + "d_id": 38756, + "documentation": { + "docstring": "Test comparing an array with a scalar when all values are equal.", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 11, + "language": "en" + } + }, + { + "id": 189736, + "commit_id": "5b11a0e48b5564cdf02c11dd177f7f5c9f0b9f7a", + "repo": "manim", + "path": "manim/utils/tex_file_writing.py", + "file_name": "tex_file_writing.py", + "fun_name": "generate_tex_file", + "commit_message": "Improved Error in :mod:`.utils.tex_file_writing` (#2574)\n\n* Better Error and insight\r\n\r\n* Do not use keywords as identifiers\r\n\r\n* add_tests\r\n\r\n* Nasty comma\r\n\r\n* Windows does its own thing\r\n\r\n* Use os.path.join for windows\r\n\r\n* Do not log path\r\n\r\n* Include Insights\r\n\r\n* Full stop.\r\n\r\nCo-authored-by: Darylgolden \r\n\r\n* Full stop to test data.\r\n\r\nCo-authored-by: Darylgolden ", + "code": "def generate_tex_file(expression, environment=None, tex_template=None):\n \n if tex_template is None:\n tex_template = config[\"tex_template\"]\n if environment is not None:\n output = tex_template.get_texcode_for_expression_in_env(expression, environment)\n else:\n output = tex_template.get_texcode_for_expression(expression)\n\n tex_dir = config.get_dir(\"tex_dir\")\n if not os.path.exists(tex_dir):\n os.makedirs(tex_dir)\n\n result = os.path.join(tex_dir, tex_hash(output)) + \".tex\"\n if not os.path.exists(result):\n logger.info(f\"Writing {expression} to %(path)s\", {\"path\": f\"{result}\"})\n with open(result, \"w\", encoding=\"utf-8\") as outfile:\n outfile.write(output)\n return result\n\n", + "url": "https://github.com/ManimCommunity/manim.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 135, + "n_words": 55, + "vocab_size": 41, + "complexity": 5, + "nloc": 16, + "token_counts": 140, + "n_ast_nodes": 245, + "n_identifiers": 23, + "d_id": 46189, + "documentation": { + "docstring": "Takes a tex expression (and an optional tex environment),\n and returns a fully formed tex file ready for compilation.\n\n Parameters\n ----------\n expression : :class:`str`\n String containing the TeX expression to be rendered, e.g. ``\\\\sqrt{2}`` or ``foo``\n environment : Optional[:class:`str`], optional\n The string containing the environment in which the expression should be typeset, e.g. ``align*``\n tex_template : Optional[:class:`~.TexTemplate`], optional\n Template class used to typesetting. If not set, use default template set via `config[\"tex_template\"]`\n\n Returns\n -------\n :class:`str`\n Path to generated TeX file\n ", + "n_words": 80, + "vocab_size": 59, + "n_whitespaces": 138, + "language": "en" + } + }, + { + "id": 61320, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/unpacking.py", + "file_name": "unpacking.py", + "fun_name": "has_leading_dir", + "commit_message": "upd; format", + "code": "def has_leading_dir(paths):\n # type: (Iterable[str]) -> bool\n \n common_prefix = None\n for path in paths:\n prefix, rest = split_leading_dir(path)\n if not prefix:\n return False\n elif common_prefix is None:\n common_prefix = prefix\n elif prefix != common_prefix:\n return False\n return True\n\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 114, + "n_words": 38, + "vocab_size": 29, + "complexity": 5, + "nloc": 11, + "token_counts": 45, + "n_ast_nodes": 76, + "n_identifiers": 7, + "d_id": 12506, + "documentation": { + "docstring": "Returns true if all the paths have the same leading path name\n (i.e., everything is in one subdirectory in an archive)", + "n_words": 21, + "vocab_size": 19, + "n_whitespaces": 23, + "language": "en" + } + }, + { + "id": 3876, + "commit_id": "2282a4ae0221b1fb88e16eca8bc14a166998d2d2", + "repo": "airbyte", + "path": "airbyte-integrations/connectors/source-hubspot/unit_tests/test_source.py", + "file_name": "test_source.py", + "fun_name": "test_stream_with_splitting_properties_with_new_record", + "commit_message": "🎉 Source Hubspot: Migrate to CDK (#10177)\n\n* migrate SourceHubspot to cdk\r\n\r\n* refactor discover method\r\n\r\n* change method name\r\n\r\n* deleted Client class\r\n\r\n* remove comment\r\n\r\n* added get_updated_state\r\n\r\n* fix setting initial state\r\n\r\n* fix stream_state dict key\r\n\r\n* fix cursor_field\r\n\r\n* change check test case status\r\n\r\n* refactor streams method\r\n\r\n* remove comment\r\n\r\n* remove TODOs\r\n\r\n* remove comments\r\n\r\n* fix get_updated_state\r\n\r\n* refactor chunk_read\r\n\r\n* override _read_incremental\r\n\r\n* fix unit tests\r\n\r\n* remove comments\r\n\r\n* fix test_check_connection_backoff_on_server_error\r\n\r\n* fix test_check_connection_backoff_on_server_error 2\r\n\r\n* fix test_check_connection_backoff_on_limit_reached\r\n\r\n* fix unit tests\r\n\r\n* clear comments\r\n\r\n* override read method on Source\r\n\r\n* added comments to overriding methods\r\n\r\n* some improvements\r\n\r\n* reafactor overridden _read_incremental\r\n\r\n* format code\r\n\r\n* refactor discovery\r\n\r\n* remove discover\r\n\r\n* format code 2\r\n\r\n* added return types\r\n\r\n* refactor template stream classes\r\n\r\n* remove comments\r\n\r\n* remove _name field\r\n\r\n* rename api.py to streams.py\r\n\r\n* move to HttpStream\r\n\r\n* refactor FormSubmissions\r\n\r\n* refactor Campaings\r\n\r\n* refactor ContactsListMemberships\r\n\r\n* CRMSearchStream refactor\r\n\r\n* CRMSearchStream refactor 2\r\n\r\n* CRMObjectStream refactor\r\n\r\n* DealStageHistoryStream refactor\r\n\r\n* Deals refactor\r\n\r\n* Engagements refactor\r\n\r\n* path method refactor\r\n\r\n* refactor authentication\r\n\r\n* fix check_connection\r\n\r\n* fix call parse_response\r\n\r\n* fix Engagements stream\r\n\r\n* fix CRMSearchStream\r\n\r\n* fix CRMObjectIncremental stream\r\n\r\n* override _read_incremental\r\n\r\n* remove commented codes\r\n\r\n* format code\r\n\r\n* update cdk version\r\n\r\n* fix cursor field\r\n\r\n* fix unit tests\r\n\r\n* removed client\r\n\r\n* clear comments\r\n\r\n* clear comments 2\r\n\r\n* clear comments 3\r\n\r\n* clear comments 4\r\n\r\n* override backoff_time\r\n\r\n* remove comment\r\n\r\n* format code\r\n\r\n* backoff_time modified\r\n\r\n* refactor backoff_time\r\n\r\n* format code\r\n\r\n* added return typing\r\n\r\n* format code\r\n\r\n* removed cursor_paths\r\n\r\n* bump version\r\n\r\n* updated spec and def yaml\r\n\r\nCo-authored-by: auganbay ", + "code": "def test_stream_with_splitting_properties_with_new_record(self, requests_mock, common_params, api, fake_properties_list):\n \n\n parsed_properties = list(split_properties(fake_properties_list))\n self.set_mock_properties(requests_mock, \"/properties/v2/deal/properties\", fake_properties_list)\n\n test_stream = Deals(**common_params)\n\n deal_stage_history_response = {\n \"deals\": [\n {\n \"portalId\": 123,\n \"dealId\": 111,\n \"isDeleted\": False,\n \"associations\": None,\n \"properties\": {\n \"dealstage\": {\n \"value\": \"appointmentscheduled\",\n \"timestamp\": 1610533842221,\n \"source\": \"API\",\n \"sourceId\": None,\n \"updatedByUserId\": None,\n \"versions\": [\n {\n \"name\": \"dealstage\",\n \"value\": \"appointmentscheduled\",\n \"timestamp\": 1610533842221,\n \"source\": \"API\",\n \"sourceVid\": [],\n \"requestId\": \"19f07c43-b187-4ab6-9fab-4a0f261f0a8c\",\n }\n ],\n }\n },\n \"stateChanges\": [],\n },\n {\n \"portalId\": 123,\n \"dealId\": 112,\n \"isDeleted\": False,\n \"associations\": None,\n \"properties\": {\n \"dealstage\": {\n \"value\": \"appointmentscheduled\",\n \"timestamp\": 1610533911154,\n \"source\": \"API\",\n \"sourceId\": None,\n \"updatedByUserId\": None,\n \"versions\": [\n {\n \"name\": \"dealstage\",\n \"value\": \"appointmentscheduled\",\n \"timestamp\": 1610533911154,\n \"source\": \"API\",\n \"sourceVid\": [],\n \"requestId\": \"41a1eeff-569b-4193-ba80-238d3bd13f56\",\n }\n ],\n }\n },\n \"stateChanges\": [],\n },\n ]\n }\n\n requests_mock.register_uri(\n \"GET\",\n test_stream._stage_history.path(),\n [\n {\n \"json\": deal_stage_history_response,\n \"status_code\": 200,\n }\n ],\n )\n\n ids_list = [\"6043593519\", \"1092593519\", \"1092593518\", \"1092593517\", \"1092593516\"]\n for property_slice in parsed_properties:\n record_responses = [\n {\n \"json\": {\n \"results\": [\n {**self.BASE_OBJECT_BODY, **{\"id\": id, \"properties\": {p: \"fake_data\" for p in property_slice}}}\n for id in ids_list\n ],\n \"paging\": {},\n },\n \"status_code\": 200,\n }\n ]\n requests_mock.register_uri(\"GET\", f\"{test_stream.url}?properties={','.join(property_slice)}\", record_responses)\n ids_list.append(\"1092593513\")\n\n stream_records = list(test_stream.read_records(sync_mode=SyncMode.incremental))\n\n assert len(stream_records) == 6\n\n\n@pytest.fixture(name=\"configured_catalog\")", + "url": "https://github.com/airbytehq/airbyte.git", + "language": "Python", + "ast_errors": "@pytest.fixture(name=\"configured_catalog\")", + "n_ast_errors": 1, + "ast_levels": 21, + "n_whitespaces": 1988, + "n_words": 177, + "vocab_size": 92, + "complexity": 4, + "nloc": 88, + "token_counts": 357, + "n_ast_nodes": 673, + "n_identifiers": 34, + "d_id": 582, + "documentation": { + "docstring": "\n Check working stream `workflows` with large list of properties using new functionality with splitting properties\n ", + "n_words": 15, + "vocab_size": 13, + "n_whitespaces": 30, + "language": "en" + } + }, + { + "id": 261691, + "commit_id": "86080bbd5fe9513cd42cf34148ea5907a1a9fc6c", + "repo": "scikit-learn", + "path": "sklearn/utils/tests/test_extmath.py", + "file_name": "test_extmath.py", + "fun_name": "test_cartesian_mix_types", + "commit_message": "ENH cartesian accepts mixed dtypes arrays (#25067)\n\nCo-authored-by: Christian Lorentzen ", + "code": "def test_cartesian_mix_types(arrays, output_dtype):\n \n output = cartesian(arrays)\n\n assert output.dtype == output_dtype\n\n", + "url": "https://github.com/scikit-learn/scikit-learn.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 19, + "n_words": 10, + "vocab_size": 10, + "complexity": 1, + "nloc": 3, + "token_counts": 20, + "n_ast_nodes": 34, + "n_identifiers": 6, + "d_id": 76936, + "documentation": { + "docstring": "Check that the cartesian product works with mixed types.", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 262448, + "commit_id": "a0a9279e4b8c306875b6437f853bdcc31ee5f1cf", + "repo": "TTS", + "path": "TTS/vocoder/models/gan.py", + "file_name": "gan.py", + "fun_name": "get_optimizer", + "commit_message": "Fix GAN optimizer order\n\ncommit 212d330929c22d0cd970be2023770dc1e39449ab\nAuthor: Edresson Casanova \nDate: Fri Apr 29 16:29:44 2022 -0300\n\n Fix unit test\n\ncommit 44456b0483bf42b1337a8e408ac17af38b26b1fa\nAuthor: Edresson Casanova \nDate: Fri Apr 29 07:28:39 2022 -0300\n\n Fix style\n\ncommit d545beadb932758eb7d1c632778fe317d467a6a4\nAuthor: Edresson Casanova \nDate: Thu Apr 28 17:08:04 2022 -0300\n\n Change order of HIFI-GAN optimizers to be equal than the original repository\n\ncommit 657c5442e5339581e5c09168f5212112a342d97a\nAuthor: Edresson Casanova \nDate: Thu Apr 28 15:40:16 2022 -0300\n\n Remove audio padding before mel spec extraction\n\ncommit 76b274e6901495ffe62ec745fd8ca9fd010f4857\nMerge: 379ccd7b 6233f4fc\nAuthor: Edresson Casanova \nDate: Wed Apr 27 07:28:48 2022 -0300\n\n Merge pull request #1541 from coqui-ai/comp_emb_fix\n\n Bug fix in compute embedding without eval partition\n\ncommit 379ccd7ba6b7e7b550e7d6acf55760c6d0623ba8\nAuthor: WeberJulian \nDate: Wed Apr 27 10:42:26 2022 +0200\n\n returns y_mask in VITS inference (#1540)\n\n * returns y_mask\n\n * make style", + "code": "def get_optimizer(self) -> List:\n \n optimizer1 = get_optimizer(\n self.config.optimizer, self.config.optimizer_params, self.config.lr_gen, self.model_g\n )\n optimizer2 = get_optimizer(\n self.config.optimizer, self.config.optimizer_params, self.config.lr_disc, self.model_d\n )\n return [optimizer2, optimizer1]\n", + "url": "https://github.com/coqui-ai/TTS.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 87, + "n_words": 23, + "vocab_size": 18, + "complexity": 1, + "nloc": 15, + "token_counts": 66, + "n_ast_nodes": 98, + "n_identifiers": 12, + "d_id": 77223, + "documentation": { + "docstring": "Initiate and return the GAN optimizers based on the config parameters.\n\n It returnes 2 optimizers in a list. First one is for the generator and the second one is for the discriminator.\n\n Returns:\n List: optimizers.\n ", + "n_words": 35, + "vocab_size": 26, + "n_whitespaces": 67, + "language": "en" + } + }, + { + "id": 120628, + "commit_id": "b64e36b60fca9661ca2c8ae51a56fae07bf5efe6", + "repo": "jax", + "path": "jax/_src/lax/eigh.py", + "file_name": "eigh.py", + "fun_name": "_update_slice", + "commit_message": "Make QDWH-eig implementation jit-table.\n\nMove QDWH-eig from jax._src.scipy.eigh to jax._src.lax.eigh, in preparation for using it to back `lax.eigh` in a future change.\n\nPiperOrigin-RevId: 449362382", + "code": "def _update_slice(operand, update, start_indices, update_dims):\n \n operand_shape = operand.shape\n operand = lax.pad(operand,\n jnp.array(0, operand.dtype),\n [(0, d, 0) for d in update.shape])\n start_indices = tuple(jnp.int32(i) for i in start_indices)\n t = lax.dynamic_slice(operand, start_indices, update.shape)\n t = _mask(update, update_dims, t)\n operand = lax.dynamic_update_slice(operand, t, start_indices)\n return lax.slice(operand, [0] * operand.ndim, operand_shape)\n\n", + "url": "https://github.com/google/jax.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 94, + "n_words": 48, + "vocab_size": 37, + "complexity": 3, + "nloc": 10, + "token_counts": 120, + "n_ast_nodes": 173, + "n_identifiers": 22, + "d_id": 26903, + "documentation": { + "docstring": "\n Similar to lax.dynamic_update_slice, but handles padded updates where padding\n values should not overwrite existing values in the array.\n\n Args:\n operand: the array to update\n update: the padded array to write\n start_indices: the offset at which to write `update`.\n update_dims: the true dimensions of the padded update `update`. Only values\n inside the rectangle given by `update_dims` will be overwritten.", + "n_words": 58, + "vocab_size": 41, + "n_whitespaces": 68, + "language": "en" + } + }, + { + "id": 296453, + "commit_id": "23264c8fd4a3f8bcff5961ed11cab6388d3c67a4", + "repo": "core", + "path": "homeassistant/components/roon/config_flow.py", + "file_name": "config_flow.py", + "fun_name": "async_step_link", + "commit_message": "Improve roon integraton (#66000)\n\n* Update to new library, revise discovery to work with new library, specify port to work with new library.\r\n\r\n* Move user gui to fallback.\r\n\r\n* Revise tests.\r\n\r\n* Handle old config.\r\n\r\n* Improve debugging, refresh faster on load.\r\n\r\n* Remove duplicate.\r\n\r\n* Bump library version.\r\n\r\n* Fix docstring per review.\r\n\r\n* Review suggestion\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Review suggestion\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Add check for duplicate host.\r\n\r\n* Add error message to strings.\r\n\r\n* Tidy.\r\n\r\n* Review changes.\r\n\r\n* Remove default.\r\n\r\nCo-authored-by: Martin Hjelmare ", + "code": "async def async_step_link(self, user_input=None):\n \n errors = {}\n if user_input is not None:\n # Do not authenticate if the host is already configured\n self._async_abort_entries_match({CONF_HOST: self._host})\n\n try:\n info = await authenticate(\n self.hass, self._host, self._port, self._servers\n )\n\n except InvalidAuth:\n errors[\"base\"] = \"invalid_auth\"\n except Exception: # pylint: disable=broad-except\n _LOGGER.exception(\"Unexpected exception\")\n errors[\"base\"] = \"unknown\"\n else:\n return self.async_create_entry(title=DEFAULT_NAME, data=info)\n\n return self.async_show_form(step_id=\"link\", errors=errors)\n\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 260, + "n_words": 56, + "vocab_size": 46, + "complexity": 4, + "nloc": 16, + "token_counts": 107, + "n_ast_nodes": 182, + "n_identifiers": 22, + "d_id": 95433, + "documentation": { + "docstring": "Handle linking and authenticting with the roon server.", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 80125, + "commit_id": "ad65741b94f36fbe793cf15f0ab002482070cdb6", + "repo": "wagtail", + "path": "wagtail/tests/streamfield_migrations/test_migrations.py", + "file_name": "test_migrations.py", + "fun_name": "_test_migrate_stream_data", + "commit_message": "Add tests for streamfield migration helpers\n\nCurrently failing due to wagtail-factories being broken on Wagtail 4.1: https://github.com/wagtail/wagtail-factories/issues/65", + "code": "def _test_migrate_stream_data(self):\n \n\n self.apply_migration()\n\n instances = self.model.objects.all().annotate(\n raw_content=Cast(F(\"content\"), JSONField())\n )\n\n for instance in instances:\n prev_content = self.original_raw_data[instance.id]\n self.assertBlocksRenamed(\n old_content=prev_content, new_content=instance.raw_content\n )\n\n # TODO test multiple operations applied in one migration\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 126, + "n_words": 29, + "vocab_size": 26, + "complexity": 2, + "nloc": 10, + "token_counts": 68, + "n_ast_nodes": 113, + "n_identifiers": 19, + "d_id": 17011, + "documentation": { + "docstring": "Test whether the stream data of the model instances have been updated properly\n\n Apply the migration and then query the raw data of the updated instances. Compare with\n original raw data and check whether all relevant `char1` blocks have been renamed and\n whether ids and other block types are intact.\n ", + "n_words": 50, + "vocab_size": 34, + "n_whitespaces": 78, + "language": "en" + } + }, + { + "id": 183574, + "commit_id": "7f27e70440c177b2a047b7f74a78ed5cd5b4b596", + "repo": "textual", + "path": "src/textual/_terminal_features.py", + "file_name": "_terminal_features.py", + "fun_name": "synchronized_output_end_sequence", + "commit_message": "[terminal buffering] Address PR feedback", + "code": "def synchronized_output_end_sequence(self) -> str:\n \n if self.synchronised_output:\n return TERMINAL_MODES_ANSI_SEQUENCES[Mode.SynchronizedOutput][\"end_sync\"]\n return \"\"\n", + "url": "https://github.com/Textualize/textual.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 42, + "n_words": 10, + "vocab_size": 9, + "complexity": 2, + "nloc": 13, + "token_counts": 25, + "n_ast_nodes": 45, + "n_identifiers": 7, + "d_id": 44257, + "documentation": { + "docstring": "\n Returns the ANSI sequence that we should send to the terminal to tell it that\n it should stop buffering the content we're about to send.\n If the terminal doesn't seem to support synchronised updates the string will be empty.\n\n Returns:\n str: the \"synchronised output stop\" ANSI sequence. It will be ab empty string\n if the terminal emulator doesn't seem to support the \"synchronised updates\" mode.\n ", + "n_words": 65, + "vocab_size": 41, + "n_whitespaces": 127, + "language": "en" + } + }, + { + "id": 104765, + "commit_id": "445107bae3fcd6ac9eeae503232960fa4ba8ccfd", + "repo": "datasets", + "path": "src/datasets/arrow_dataset.py", + "file_name": "arrow_dataset.py", + "fun_name": "num_rows", + "commit_message": "Add code examples to API docs (#4168)\n\n* add code examples for functions related to the base dataset class\r\n\r\n* ✨ make style\r\n\r\n* 🖍 make each code example fully reproducible where applicable\r\n\r\n* 🖍 show parameter usage for some functions\r\n\r\n* 🖍 add examples for DatasetInfo functions", + "code": "def num_rows(self) -> int:\n \n if self._indices is not None:\n return self._indices.num_rows\n return self._data.num_rows\n", + "url": "https://github.com/huggingface/datasets.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 45, + "n_words": 13, + "vocab_size": 12, + "complexity": 2, + "nloc": 15, + "token_counts": 28, + "n_ast_nodes": 46, + "n_identifiers": 5, + "d_id": 21960, + "documentation": { + "docstring": "Number of rows in the dataset (same as :meth:`Dataset.__len__`).\n\n Example:\n\n ```py\n >>> from datasets import load_dataset\n >>> ds = load_dataset(\"rotten_tomatoes\", split=\"validation\")\n >>> ds.num_rows\n 1066\n ```\n ", + "n_words": 25, + "vocab_size": 23, + "n_whitespaces": 81, + "language": "en" + } + }, + { + "id": 314397, + "commit_id": "10dc38e0ec27f7bef990ee431459342f9c3c52b4", + "repo": "core", + "path": "homeassistant/components/acmeda/cover.py", + "file_name": "cover.py", + "fun_name": "current_cover_position", + "commit_message": "Adjust CoverEntity property type hints in components (#73943)\n\n* Adjust CoverEntity property type hints in components\r\n\r\n* Revert changes to rflink\r\n\r\n* Revert changes to wilight", + "code": "def current_cover_position(self) -> int | None:\n \n position = None\n if self.roller.type != 7:\n position = 100 - self.roller.closed_percent\n return position\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 59, + "n_words": 20, + "vocab_size": 17, + "complexity": 2, + "nloc": 9, + "token_counts": 33, + "n_ast_nodes": 55, + "n_identifiers": 7, + "d_id": 113004, + "documentation": { + "docstring": "Return the current position of the roller blind.\n\n None is unknown, 0 is closed, 100 is fully open.\n ", + "n_words": 18, + "vocab_size": 15, + "n_whitespaces": 32, + "language": "en" + } + }, + { + "id": 167449, + "commit_id": "e48c9c3973286e257f6da1966c91806d86b917e0", + "repo": "pandas", + "path": "pandas/io/parsers/readers.py", + "file_name": "readers.py", + "fun_name": "TextParser", + "commit_message": "TYP: more return annotations for io/* (#47524)\n\n* TYP: more return annotations for io/*\r\n\r\n* import future", + "code": "def TextParser(*args, **kwds) -> TextFileReader:\n \n kwds[\"engine\"] = \"python\"\n return TextFileReader(*args, **kwds)\n\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 20, + "n_words": 11, + "vocab_size": 10, + "complexity": 1, + "nloc": 58, + "token_counts": 27, + "n_ast_nodes": 49, + "n_identifiers": 4, + "d_id": 40021, + "documentation": { + "docstring": "\n Converts lists of lists/tuples into DataFrames with proper type inference\n and optional (e.g. string to datetime) conversion. Also enables iterating\n lazily over chunks of large files\n\n Parameters\n ----------\n data : file-like object or list\n delimiter : separator character to use\n dialect : str or csv.Dialect instance, optional\n Ignored if delimiter is longer than 1 character\n names : sequence, default\n header : int, default 0\n Row to use to parse column labels. Defaults to the first row. Prior\n rows will be discarded\n index_col : int or list, optional\n Column or columns to use as the (possibly hierarchical) index\n has_index_names: bool, default False\n True if the cols defined in index_col have an index name and are\n not in the header.\n na_values : scalar, str, list-like, or dict, optional\n Additional strings to recognize as NA/NaN.\n keep_default_na : bool, default True\n thousands : str, optional\n Thousands separator\n comment : str, optional\n Comment out remainder of line\n parse_dates : bool, default False\n keep_date_col : bool, default False\n date_parser : function, optional\n skiprows : list of integers\n Row numbers to skip\n skipfooter : int\n Number of line at bottom of file to skip\n converters : dict, optional\n Dict of functions for converting values in certain columns. Keys can\n either be integers or column labels, values are functions that take one\n input argument, the cell (not column) content, and return the\n transformed content.\n encoding : str, optional\n Encoding to use for UTF when reading/writing (ex. 'utf-8')\n squeeze : bool, default False\n returns Series if only one column.\n infer_datetime_format: bool, default False\n If True and `parse_dates` is True for a column, try to infer the\n datetime format based on the first datetime string. If the format\n can be inferred, there often will be a large parsing speed-up.\n float_precision : str, optional\n Specifies which converter the C engine should use for floating-point\n values. The options are `None` or `high` for the ordinary converter,\n `legacy` for the original lower precision pandas converter, and\n `round_trip` for the round-trip converter.\n\n .. versionchanged:: 1.2\n ", + "n_words": 331, + "vocab_size": 197, + "n_whitespaces": 588, + "language": "en" + } + }, + { + "id": 204805, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/db/backends/base/base.py", + "file_name": "base.py", + "fun_name": "_nodb_cursor", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def _nodb_cursor(self):\n \n conn = self.__class__({**self.settings_dict, \"NAME\": None}, alias=NO_DB_ALIAS)\n try:\n with conn.cursor() as cursor:\n yield cursor\n finally:\n conn.close()\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 82, + "n_words": 17, + "vocab_size": 17, + "complexity": 2, + "nloc": 7, + "token_counts": 47, + "n_ast_nodes": 85, + "n_identifiers": 9, + "d_id": 50892, + "documentation": { + "docstring": "\n Return a cursor from an alternative connection to be used when there is\n no need to access the main database, specifically for test db\n creation/deletion. This also prevents the production database from\n being exposed to potential child threads while (or after) the test\n database is destroyed. Refs #10868, #17786, #16969.\n ", + "n_words": 50, + "vocab_size": 42, + "n_whitespaces": 93, + "language": "en" + } + }, + { + "id": 131451, + "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", + "repo": "ray", + "path": "python/ray/tests/test_command_runner.py", + "file_name": "test_command_runner.py", + "fun_name": "test_command_runner_interface_abstraction_violation", + "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", + "code": "def test_command_runner_interface_abstraction_violation():\n \n\n cmd_runner_interface_public_functions = dir(CommandRunnerInterface)\n allowed_public_interface_functions = {\n func\n for func in cmd_runner_interface_public_functions\n if not func.startswith(\"_\")\n }\n for subcls in [SSHCommandRunner, DockerCommandRunner, KubernetesCommandRunner]:\n subclass_available_functions = dir(subcls)\n subclass_public_functions = {\n func for func in subclass_available_functions if not func.startswith(\"_\")\n }\n assert allowed_public_interface_functions == subclass_public_functions\n\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 117, + "n_words": 42, + "vocab_size": 23, + "complexity": 6, + "nloc": 13, + "token_counts": 66, + "n_ast_nodes": 108, + "n_identifiers": 13, + "d_id": 29533, + "documentation": { + "docstring": "Enforces the CommandRunnerInterface functions on the subclasses.\n\n This is important to make sure the subclasses do not violate the\n function abstractions. If you need to add a new function to one of\n the CommandRunnerInterface subclasses, you have to add it to\n CommandRunnerInterface and all of its subclasses.\n ", + "n_words": 47, + "vocab_size": 32, + "n_whitespaces": 62, + "language": "en" + } + }, + { + "id": 96190, + "commit_id": "cf30c11a194aa5e61d8d7c7fc506764f846fcf82", + "repo": "sentry", + "path": "src/sentry/search/events/builder.py", + "file_name": "builder.py", + "fun_name": "resolve_granularity", + "commit_message": "feat(MEP): Add initial framework for metric queries (#31649)\n\n- This adds a MetricsQueryBuilder, which works very similarily to our\r\n QueryBuilder, but with specific handlers for how metrics construct\r\n queries\r\n- This MetricsQueryBuilder does not yet construct snql queries, and will\r\n not because table queries will require multiple queries to construct\r\n similar table data\r\n - that is, if we want [transaction, p95, count_unique(user)], we need\r\n a query against distributions with [transaction, p95] followed by a\r\n second query for [transaction, count_unique(user)] against the sets\r\n table\r\n - This is so we can maintain a sortby", + "code": "def resolve_granularity(self) -> Granularity:\n \n start = cast(datetime, self.params[\"start\"])\n end = cast(datetime, self.params[\"end\"])\n duration = (end - start).seconds\n\n # TODO: could probably allow some leeway on the start & end (a few minutes) and use a bigger granularity\n # eg. yesterday at 11:59pm to tomorrow at 12:01am could still use the day bucket\n\n # Query is at least an hour\n if start.minute == end.minute == 0 and duration % 3600 == 0:\n # we're going from midnight -> midnight which aligns with our daily buckets\n if start.hour == end.hour == 0 and duration % 86400 == 0:\n granularity = 86400\n # we're roughly going from start of hour -> next which aligns with our hourly buckets\n else:\n granularity = 3600\n # We're going from one random minute to another, we could use the 10s bucket, but no reason for that precision\n # here\n else:\n granularity = 60\n return Granularity(granularity)\n", + "url": "https://github.com/getsentry/sentry.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 317, + "n_words": 148, + "vocab_size": 91, + "complexity": 5, + "nloc": 20, + "token_counts": 95, + "n_ast_nodes": 160, + "n_identifiers": 13, + "d_id": 19289, + "documentation": { + "docstring": "Granularity impacts metric queries even when they aren't timeseries because the data needs to be\n pre-aggregated\n\n Granularity is determined by checking the alignment of our start & end timestamps with the timestamps in\n snuba. eg. we can only use the daily granularity if the query starts and ends at midnight\n Seconds are ignored under the assumption that there currently isn't a valid use case to have\n to-the-second accurate information\n ", + "n_words": 69, + "vocab_size": 60, + "n_whitespaces": 111, + "language": "en" + } + }, + { + "id": 125751, + "commit_id": "5030a4c1d384e4bb1a25169384d7465e718e99a5", + "repo": "ray", + "path": "rllib/connectors/tests/test_agent.py", + "file_name": "test_agent.py", + "fun_name": "test_vr_connector_respects_training_or_inference_vr_flags", + "commit_message": "[RLlib] Simplify agent collector (#26803)", + "code": "def test_vr_connector_respects_training_or_inference_vr_flags(self):\n \n view_rq_dict = {\n \"both\": ViewRequirement(\n \"obs\", used_for_training=True, used_for_compute_actions=True\n ),\n \"only_inference\": ViewRequirement(\n \"obs\", used_for_training=False, used_for_compute_actions=True\n ),\n \"none\": ViewRequirement(\n \"obs\", used_for_training=False, used_for_compute_actions=False\n ),\n \"only_training\": ViewRequirement(\n \"obs\", used_for_training=True, used_for_compute_actions=False\n ),\n }\n\n obs_arr = np.array([0, 1, 2, 3])\n agent_data = dict(obs=obs_arr)\n data = AgentConnectorDataType(0, 1, agent_data)\n\n ctx = ConnectorContext(view_requirements=view_rq_dict)\n\n # TODO @jun What is the expected behavior of this test?\n for_action_expected_list = [\n # is_training = False\n SampleBatch({\"both\": obs_arr, \"only_inference\": obs_arr}),\n # is_training = True\n SampleBatch({\"both\": obs_arr, \"only_inference\": obs_arr}),\n ]\n\n for_training_expected_list = [\n # is_training = False\n None,\n # is_training = True\n agent_data,\n ]\n\n for is_training in [True, False]:\n c = ViewRequirementAgentConnector(ctx)\n c.is_training(is_training)\n processed = c([data])\n\n for_training = processed[0].data.for_training\n for_training_expected = for_training_expected_list[is_training]\n for_action = processed[0].data.for_action\n for_action_expected = for_action_expected_list[is_training]\n\n print(\"-\" * 30)\n print(f\"is_training = {is_training}\")\n print(\"for action:\")\n print(for_action)\n print(\"for training:\")\n print(for_training)\n\n # TODO @jun is for_training expected to always be equal to data?\n check(for_training, for_training_expected)\n check(for_action, for_action_expected)\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 647, + "n_words": 144, + "vocab_size": 88, + "complexity": 2, + "nloc": 43, + "token_counts": 250, + "n_ast_nodes": 414, + "n_identifiers": 30, + "d_id": 27968, + "documentation": { + "docstring": "Tests that the connector respects the flags within view_requirements (i.e.\n used_for_training, used_for_compute_actions) under different is_training modes.\n For inference,\n the returned data should be state -> obs\n For training,\n the returned data should be the data itself. The higher level policy\n collector in env_runner will construct the proper data structure.\n ", + "n_words": 49, + "vocab_size": 37, + "n_whitespaces": 110, + "language": "en" + } + }, + { + "id": 204647, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/core/management/commands/loaddata.py", + "file_name": "loaddata.py", + "fun_name": "fixture_dirs", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def fixture_dirs(self):\n \n dirs = []\n fixture_dirs = settings.FIXTURE_DIRS\n if len(fixture_dirs) != len(set(fixture_dirs)):\n raise ImproperlyConfigured(\"settings.FIXTURE_DIRS contains duplicates.\")\n for app_config in apps.get_app_configs():\n app_label = app_config.label\n app_dir = os.path.join(app_config.path, \"fixtures\")\n if app_dir in fixture_dirs:\n raise ImproperlyConfigured(\n \"'%s' is a default fixture directory for the '%s' app \"\n \"and cannot be listed in settings.FIXTURE_DIRS.\"\n % (app_dir, app_label)\n )\n\n if self.app_label and app_label != self.app_label:\n continue\n if os.path.isdir(app_dir):\n dirs.append(app_dir)\n dirs.extend(fixture_dirs)\n dirs.append(\"\")\n return [os.path.realpath(d) for d in dirs]\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 311, + "n_words": 72, + "vocab_size": 57, + "complexity": 8, + "nloc": 21, + "token_counts": 134, + "n_ast_nodes": 225, + "n_identifiers": 22, + "d_id": 50825, + "documentation": { + "docstring": "\n Return a list of fixture directories.\n\n The list contains the 'fixtures' subdirectory of each installed\n application, if it exists, the directories in FIXTURE_DIRS, and the\n current directory.\n ", + "n_words": 27, + "vocab_size": 23, + "n_whitespaces": 63, + "language": "en" + } + }, + { + "id": 159323, + "commit_id": "6339856514897056716bb531acb8489c9cf05d26", + "repo": "rasa", + "path": "rasa/shared/importers/importer.py", + "file_name": "importer.py", + "fun_name": "get_config_file_for_auto_config", + "commit_message": "Add support for different recipes (#10641)\n\n* Add support for different recipes\r\n\r\nFixes https://github.com/RasaHQ/rasa/issues/10473\r\n\r\n* Update docs/docs/graph-recipe.mdx\r\n\r\nCo-authored-by: Joe Juzl ", + "code": "def get_config_file_for_auto_config(self) -> Optional[Text]:\n \n return self.config_file\n", + "url": "https://github.com/RasaHQ/rasa.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 6, + "n_whitespaces": 20, + "n_words": 6, + "vocab_size": 6, + "complexity": 1, + "nloc": 3, + "token_counts": 15, + "n_ast_nodes": 26, + "n_identifiers": 5, + "d_id": 38195, + "documentation": { + "docstring": "Returns config file path for auto-config only if there is a single one.", + "n_words": 13, + "vocab_size": 13, + "n_whitespaces": 12, + "language": "en" + } + }, + { + "id": 22626, + "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", + "repo": "Python", + "path": "ftp_send_receive.py", + "file_name": "ftp_send_receive.py", + "fun_name": "receive_file", + "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", + "code": "def receive_file(filename=\"example.txt\"):\n with open(filename, \"wb\") as out_file:\n ftp.retrbinary(\"RETR \" + filename, out_file.write, 1024)\n ftp.quit()\n\n\n\n\n", + "url": "https://github.com/geekcomputers/Python.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 30, + "n_words": 14, + "vocab_size": 14, + "complexity": 1, + "nloc": 4, + "token_counts": 36, + "n_ast_nodes": 69, + "n_identifiers": 8, + "d_id": 4380, + "documentation": { + "docstring": "\n\tThe file which will be sent via the FTP server\n\tThe file send will be send to the current working directory\n", + "n_words": 21, + "vocab_size": 15, + "n_whitespaces": 19, + "language": "en" + } + }, + { + "id": 203545, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/contrib/admin/views/autocomplete.py", + "file_name": "autocomplete.py", + "fun_name": "get_queryset", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def get_queryset(self):\n \n qs = self.model_admin.get_queryset(self.request)\n qs = qs.complex_filter(self.source_field.get_limit_choices_to())\n qs, search_use_distinct = self.model_admin.get_search_results(\n self.request, qs, self.term\n )\n if search_use_distinct:\n qs = qs.distinct()\n return qs\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 94, + "n_words": 23, + "vocab_size": 16, + "complexity": 2, + "nloc": 9, + "token_counts": 64, + "n_ast_nodes": 104, + "n_identifiers": 12, + "d_id": 50441, + "documentation": { + "docstring": "Return queryset based on ModelAdmin.get_search_results().", + "n_words": 5, + "vocab_size": 5, + "n_whitespaces": 4, + "language": "en" + } + }, + { + "id": 315483, + "commit_id": "b09aaba421d6d6178d582bef9ea363017e55639d", + "repo": "core", + "path": "tests/components/mikrotik/test_device_tracker.py", + "file_name": "test_device_tracker.py", + "fun_name": "test_hub_not_support_wireless", + "commit_message": "Add type hints and code cleanup for mikrotik (#74296)\n\n* Add type hints and code cleanup for mikrotik\r\n\r\n* update test and increase coverage\r\n\r\n* move setup_mikrotik_entry to __init__.py", + "code": "async def test_hub_not_support_wireless(hass, mock_device_registry_devices):\n \n\n await setup_mikrotik_entry(hass, support_wireless=False)\n device_1 = hass.states.get(\"device_tracker.device_1\")\n assert device_1\n assert device_1.state == \"home\"\n # device_2 is added from DHCP\n device_2 = hass.states.get(\"device_tracker.device_2\")\n assert device_2\n assert device_2.state == \"home\"\n\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 58, + "n_words": 31, + "vocab_size": 22, + "complexity": 1, + "nloc": 8, + "token_counts": 53, + "n_ast_nodes": 95, + "n_identifiers": 10, + "d_id": 114071, + "documentation": { + "docstring": "Test device_trackers created when hub doesn't support wireless.", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 156510, + "commit_id": "1e783d9a714160e968936cb22d54d085959ab09e", + "repo": "dask", + "path": "dask/typing.py", + "file_name": "typing.py", + "fun_name": "__dask_postcompute__", + "commit_message": "Collection Protocol (#8674)\n\n[PEP 544](https://www.python.org/dev/peps/pep-0544/) introduces the `Protocol` class to the `typing` module in Python 3.8 (the soon be the minimum supported version, https://github.com/dask/community/issues/213). Writing new Dask collections for [dask-awkward](https://github.com/ContinuumIO/dask-awkward/) has had me thinking about working on a `DaskCollection` protocol. I imagine the benefits to be:\r\n\r\n- usage with static type checkers\r\n - other activity in this area at\r\n - #8295 \r\n - #8706 \r\n - #8854\r\n - Python supporting IDEs take advantage of typing\r\n- self-documenting; some improvements to [the custom collections page](https://docs.dask.org/en/latest/custom-collections.html) of the docs. The protocol docs can be autogenerated and added to that page.\r\n- purely opt-in feature\r\n\r\nThe `typing.runtime_checkable` decorator allows use of `isinstance(x, DaskCollection)` in any code base\r\nthat uses Dask collections; for example:\r\n\r\n```python\r\n>>> from dask.typing import DaskCollection\r\n>>> import dask.array as da\r\n>>> x = da.zeros((10, 3))\r\n>>> isinstance(x, DaskCollection)\r\nTrue\r\n```\r\n(though this is an order of magnitude slower than `dask.base.is_dask_collection` which only checks for `x.__dask_graph__() is not None`; static typing checking & built-in interface documentation are the core benefits IMO)\r\n\r\nSomething else that came up in the brief discussion on a call last week was having `{Scheduler,Worker,Nanny}Plugin` protocols in `distributed`; and perhaps those are better places to start introducing protocols to Dask since on the user side typically more folks would write plugins than new collections.", + "code": "def __dask_postcompute__(self) -> tuple[PostComputeCallable, tuple]:\n \n raise NotImplementedError(\"Inheriting class must implement this method.\")\n", + "url": "https://github.com/dask/dask.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 26, + "n_words": 12, + "vocab_size": 12, + "complexity": 1, + "nloc": 23, + "token_counts": 18, + "n_ast_nodes": 32, + "n_identifiers": 5, + "d_id": 36663, + "documentation": { + "docstring": "Finalizer function and optional arguments to construct final result.\n\n Upon computation each key in the collection will have an in\n memory result, the postcompute function combines each key's\n result into a final in memory representation. For example,\n dask.array.Array concatenates the arrays at each chunk into a\n final in-memory array.\n\n Returns\n -------\n PostComputeCallable\n Callable that recieves the sequence of the results of each\n final key along with optional arguments. An example signature\n would be ``finalize(results: Sequence[Any], *args)``.\n tuple[Any, ...]\n Optional arguments passed to the function following the\n key results (the `*args` part of the\n ``PostComputeCallable``. If no additional arguments are to\n be passed then this must be an empty tuple.\n\n ", + "n_words": 109, + "vocab_size": 75, + "n_whitespaces": 256, + "language": "en" + } + }, + { + "id": 31114, + "commit_id": "34097b3304d79ace845316d4929220623279c8bc", + "repo": "transformers", + "path": "src/transformers/testing_utils.py", + "file_name": "testing_utils.py", + "fun_name": "require_intel_extension_for_pytorch", + "commit_message": "Extend Transformers Trainer Class to Enable CPU AMP and Integrate Intel Extension for PyTorch (#17138)\n\n* init PR\r\n\r\n* fix import ipex\r\n\r\n* minor fix on bf16\r\n\r\n* refine optimizer\r\n\r\n* refine args notes\r\n\r\n* refine code\r\n\r\n* refine ipex optimize args\r\n\r\n* refine half_precision_backend\r\n\r\n* black format\r\n\r\n* isort format\r\n\r\n* isort format files\r\n\r\n* flake8 format\r\n\r\n* doc builder format\r\n\r\n* refine codes\r\n\r\n* remove jit and optim bits\r\n\r\n* black preview format\r\n\r\n* Update src/transformers/trainer.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* refine code\r\n\r\n* refine notes\r\n\r\n* Update src/transformers/trainer.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/trainer.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* code refine\r\n\r\n* add ipex ut\r\n\r\n* add performance cpu doc\r\n\r\n* link to the cpu doc from main perf doc\r\n\r\n* install ipex into CI's docker\r\n\r\n* Update perf_train_cpu.mdx\r\n\r\n* Update docs/source/en/perf_train_cpu.mdx\r\n\r\nCo-authored-by: Stas Bekman \r\n\r\n* Update perf_train_cpu.mdx\r\n\r\n* Update perf_train_cpu.mdx\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Stas Bekman \r\nCo-authored-by: Stas Bekman ", + "code": "def require_intel_extension_for_pytorch(test_case):\n \n return unittest.skipUnless(is_ipex_available(), \"test requires Intel Extension for PyTorch\")(test_case)\n\n", + "url": "https://github.com/huggingface/transformers.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 16, + "n_words": 10, + "vocab_size": 10, + "complexity": 1, + "nloc": 2, + "token_counts": 20, + "n_ast_nodes": 37, + "n_identifiers": 5, + "d_id": 5681, + "documentation": { + "docstring": "\n Decorator marking a test that requires Intel Extension for PyTorch.\n\n These tests are skipped when Intel Extension for PyTorch isn't installed.\n\n ", + "n_words": 21, + "vocab_size": 18, + "n_whitespaces": 31, + "language": "en" + } + }, + { + "id": 103383, + "commit_id": "45bbe17559a541289699643ef0b541a2138a09d6", + "repo": "kitty", + "path": "kitty/cli.py", + "file_name": "cli.py", + "fun_name": "options_spec", + "commit_message": "Docs: Minor improvements to the kitty cli help documentation\n\nAdd some text roles.\nUse `kitty --hold`.\nUse `appname` and `conf_name`.\n`appname` is also applied to the system-wide configuration path.", + "code": "def options_spec() -> str:\n if not hasattr(options_spec, 'ans'):\n OPTIONS = \n setattr(options_spec, 'ans', OPTIONS.format(\n appname=appname, conf_name=appname,\n config_help=CONFIG_HELP.format(appname=appname, conf_name=appname),\n ))\n ans: str = getattr(options_spec, 'ans')\n return ans\n\n", + "url": "https://github.com/kovidgoyal/kitty.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 77, + "n_words": 25, + "vocab_size": 24, + "complexity": 2, + "nloc": 166, + "token_counts": 65, + "n_ast_nodes": 105, + "n_identifiers": 12, + "d_id": 21638, + "documentation": { + "docstring": "\n--class\ndest=cls\ndefault={appname}\ncondition=not is_macos\nSet the class part of the :italic:`WM_CLASS` window property. On Wayland, it\nsets the app id.\n\n\n--name\ncondition=not is_macos\nSet the name part of the :italic:`WM_CLASS` property. Defaults to using the\nvalue from :option:`{appname} --class`.\n\n\n--title -T\nSet the OS window title. This will override any title set by the program running\ninside kitty, permanently fixing the OS window's title. So only use this if you\nare running a program that does not set titles.\n\n\n--config -c\ntype=list\n{config_help}\n\n\n--override -o\ntype=list\nOverride individual configuration options, can be specified multiple times.\nSyntax: :italic:`name=value`. For example: :option:`{appname} -o` font_size=20\n\n\n--directory --working-directory -d\ndefault=.\nChange to the specified directory when launching.\n\n\n--detach\ntype=bool-set\ncondition=not is_macos\nDetach from the controlling terminal, if any.\n\n\n--session\nPath to a file containing the startup :italic:`session` (tabs, windows, layout,\nprograms). Use - to read from STDIN. See the :file:`README` file for details and\nan example.\n\n\n--hold\ntype=bool-set\nRemain open after child process exits. Note that this only affects the first\nwindow. You can quit by either using the close window shortcut or pressing any\nkey.\n\n\n--single-instance -1\ntype=bool-set\nIf specified only a single instance of :italic:`{appname}` will run. New\ninvocations will instead create a new top-level window in the existing\n:italic:`{appname}` instance. This allows :italic:`{appname}` to share a single\nsprite cache on the GPU and also reduces startup time. You can also have\nseparate groups of :italic:`{appname}` instances by using the :option:`{appname}\n--instance-group` option.\n\n\n--instance-group\nUsed in combination with the :option:`{appname} --single-instance` option. All\n:italic:`{appname}` invocations with the same :option:`{appname}\n--instance-group` will result in new windows being created in the first\n:italic:`{appname}` instance within that group.\n\n\n--wait-for-single-instance-window-close\ntype=bool-set\nNormally, when using :option:`{appname} --single-instance`, :italic:`{appname}`\nwill open a new window in an existing instance and quit immediately. With this\noption, it will not quit till the newly opened window is closed. Note that if no\nprevious instance is found, then :italic:`{appname}` will wait anyway,\nregardless of this option.\n\n\n--listen-on\nListen on the specified socket address for control messages. For example,\n:option:`{appname} --listen-on`=unix:/tmp/mykitty or\n:option:`{appname} --listen-on`=tcp:localhost:12345. On Linux systems, you can\nalso use abstract UNIX sockets, not associated with a file, like this:\n:option:`{appname} --listen-on`=unix:@mykitty. Environment variables are\nexpanded and relative paths are resolved with respect to the temporary\ndirectory. To control kitty, you can send commands to it with\n:italic:`{appname} @` using the :option:`{appname} @ --to` option to specify\nthis address. Unless you enabled :opt:`allow_remote_control` in\n:file:`{conf_name}.conf`, this option will be ignored. Note that if you run\n:italic:`{appname} @` within a kitty window, there is\nno need to specify the :option:`{appname} @ --to` option as it will\nautomatically read from the environment. For UNIX sockets, this can also be\nspecified in :file:`{conf_name}.conf`.\n\n\n--start-as\ntype=choices\ndefault=normal\nchoices=normal,fullscreen,maximized,minimized\nControl how the initial kitty window is created.\n\n\n# Debugging options\n\n--version -v\ntype=bool-set\nThe current {appname} version.\n\n\n--dump-commands\ntype=bool-set\nOutput commands received from child process to STDOUT.\n\n\n--replay-commands\nReplay previously dumped commands. Specify the path to a dump file previously\ncreated by :option:`{appname} --dump-commands`. You\ncan open a new kitty window to replay the commands with::\n\n {appname} --hold {appname} --replay-commands /path/to/dump/file\n\n\n--dump-bytes\nPath to file in which to store the raw bytes received from the child process.\n\n\n--debug-rendering --debug-gl\ntype=bool-set\nDebug rendering commands. This will cause all OpenGL calls to check for errors\ninstead of ignoring them. Also prints out miscellaneous debug information.\nUseful when debugging rendering problems.\n\n\n--debug-input --debug-keyboard\ndest=debug_keyboard\ntype=bool-set\nPrint out key and mouse events as they are received.\n\n\n--debug-font-fallback\ntype=bool-set\nPrint out information about the selection of fallback fonts for characters not\npresent in the main font.\n\n\n--watcher\nThis option is deprecated in favor of the :opt:`watcher` option in\n:file:`{conf_name}.conf` and should not be used.\n\n\n--execute -e\ntype=bool-set\n!\n", + "n_words": 617, + "vocab_size": 336, + "n_whitespaces": 511, + "language": "en" + } + }, + { + "id": 66656, + "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", + "repo": "erpnext", + "path": "erpnext/patches/v12_0/repost_stock_ledger_entries_for_target_warehouse.py", + "file_name": "repost_stock_ledger_entries_for_target_warehouse.py", + "fun_name": "execute", + "commit_message": "style: format code with black", + "code": "def execute():\n\twarehouse_perm = frappe.get_all(\n\t\t\"User Permission\",\n\t\tfields=[\"count(*) as p_count\", \"is_default\", \"user\"],\n\t\tfilters={\"allow\": \"Warehouse\"},\n\t\tgroup_by=\"user\",\n\t)\n\n\tif not warehouse_perm:\n\t\treturn\n\n\texecute_patch = False\n\tfor perm_data in warehouse_perm:\n\t\tif perm_data.p_count == 1 or (\n\t\t\tperm_data.p_count > 1\n\t\t\tand frappe.get_all(\n\t\t\t\t\"User Permission\",\n\t\t\t\tfilters={\"user\": perm_data.user, \"allow\": \"warehouse\", \"is_default\": 1},\n\t\t\t\tlimit=1,\n\t\t\t)\n\t\t):\n\t\t\texecute_patch = True\n\t\t\tbreak\n\n\tif not execute_patch:\n\t\treturn\n\n\tfor doctype in [\"Sales Invoice\", \"Delivery Note\"]:\n\t\tif not frappe.get_meta(doctype + \" Item\").get_field(\"target_warehouse\").hidden:\n\t\t\tcontinue\n\n\t\tcond = \"\"\n\t\tif doctype == \"Sales Invoice\":\n\t\t\tcond = \" AND parent_doc.update_stock = 1\"\n\n\t\tdata = frappe.db.sql(\n\t\t\t.format(\n\t\t\t\tdoctype=doctype, cond=cond\n\t\t\t),\n\t\t\tas_dict=1,\n\t\t)\n\n\t\tif data:\n\t\t\tnames = [d.child_name for d in data]\n\t\t\tfrappe.db.sql(\n\t\t\t\t.format(\n\t\t\t\t\tdoctype, \",\".join([\"%s\"] * len(names))\n\t\t\t\t),\n\t\t\t\ttuple(names),\n\t\t\t)\n\n\t\t\tfrappe.db.sql(\n\t\t\t\t.format(\n\t\t\t\t\tdoctype, \",\".join([\"%s\"] * len(names))\n\t\t\t\t),\n\t\t\t\ttuple(names),\n\t\t\t)\n\n\t\t\tparent_names = list(set([d.name for d in data]))\n\n\t\t\tfor d in parent_names:\n\t\t\t\tdoc = frappe.get_doc(doctype, d)\n\t\t\t\tif doc.docstatus != 1:\n\t\t\t\t\tcontinue\n\n\t\t\t\tdoc.docstatus = 2\n\t\t\t\tdoc.update_stock_ledger()\n\t\t\t\tdoc.make_gl_entries_on_cancel(repost_future_gle=False)\n\n\t\t\t\t# update stock & gl entries for submit state of PR\n\t\t\t\tdoc.docstatus = 1\n\t\t\t\tdoc.update_stock_ledger()\n\t\t\t\tdoc.make_gl_entries()\n\n\tif frappe.get_meta(\"Sales Order Item\").get_field(\"target_warehouse\").hidden:\n\t\tfrappe.db.sql(\n\t\t\t\n\t\t)\n\n\t\tfrappe.db.sql(\n\t\t\t\n\t\t)\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 18, + "n_whitespaces": 105, + "n_words": 171, + "vocab_size": 103, + "complexity": 16, + "nloc": 79, + "token_counts": 351, + "n_ast_nodes": 599, + "n_identifiers": 39, + "d_id": 14274, + "documentation": { + "docstring": " SELECT parent_doc.name as name, child_doc.name as child_name\n\t\t\tFROM\n\t\t\t\t`tab{doctype}` parent_doc, `tab{doctype} Item` child_doc\n\t\t\tWHERE\n\t\t\t\tparent_doc.name = child_doc.parent AND parent_doc.docstatus < 2\n\t\t\t\tAND child_doc.target_warehouse is not null AND child_doc.target_warehouse != ''\n\t\t\t\tAND child_doc.creation > '2020-04-16' {cond}\n\t\t UPDATE `tab{0} Item` set target_warehouse = null\n\t\t\t\tWHERE name in ({1}) UPDATE `tabPacked Item` set target_warehouse = null\n\t\t\t\tWHERE parenttype = '{0}' and parent_detail_docname in ({1})\n\t\t\t UPDATE `tabSales Order Item` set target_warehouse = null\n\t\t\tWHERE creation > '2020-04-16' and docstatus < 2 UPDATE `tabPacked Item` set target_warehouse = null\n\t\t\tWHERE creation > '2020-04-16' and docstatus < 2 and parenttype = 'Sales Order' ", + "n_words": 97, + "vocab_size": 47, + "n_whitespaces": 90, + "language": "en" + } + }, + { + "id": 19478, + "commit_id": "2669b4ce0696de02610cbea1b7547d53cead85bb", + "repo": "pipenv", + "path": "pipenv/vendor/requirementslib/models/setup_info.py", + "file_name": "setup_info.py", + "fun_name": "build_wheel", + "commit_message": "patch newly occuring test failure where the base_dir does not contain the subdirectory.", + "code": "def build_wheel(self):\n # type: () -> S\n need_delete = False\n if not self.pyproject.exists():\n if not self.build_requires:\n build_requires = '\"setuptools\", \"wheel\"'\n else:\n build_requires = \", \".join(\n ['\"{0}\"'.format(r) for r in self.build_requires]\n )\n self.pyproject.write_text(\n str(\n .format(\n build_requires, self.build_backend\n ).strip()\n )\n )\n need_delete = True\n\n parsed = urlparse(str(self.ireq.link))\n subdir = parse_qs(parsed.fragment).get('subdirectory', [])\n if subdir:\n directory = f\"{self.base_dir}/{subdir[0]}\"\n else:\n directory = self.base_dir\n result = build_pep517(\n directory,\n self.extra_kwargs[\"build_dir\"],\n config_settings=self.pep517_config,\n dist_type=\"wheel\",\n )\n if need_delete:\n self.pyproject.unlink()\n return result\n\n # noinspection PyPackageRequirements", + "url": "https://github.com/pypa/pipenv.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 440, + "n_words": 74, + "vocab_size": 53, + "complexity": 6, + "nloc": 36, + "token_counts": 156, + "n_ast_nodes": 279, + "n_identifiers": 30, + "d_id": 2994, + "documentation": { + "docstring": "\n[build-system]\nrequires = [{0}]\nbuild-backend = \"{1}\"\n ", + "n_words": 7, + "vocab_size": 6, + "n_whitespaces": 20, + "language": "en" + } + }, + { + "id": 61411, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py", + "file_name": "versioncontrol.py", + "fun_name": "make_rev_options", + "commit_message": "upd; format", + "code": "def make_rev_options(cls, rev=None, extra_args=None):\n # type: (Optional[str], Optional[CommandArgs]) -> RevOptions\n \n return RevOptions(cls, rev, extra_args=extra_args)\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 35, + "n_words": 14, + "vocab_size": 14, + "complexity": 1, + "nloc": 2, + "token_counts": 25, + "n_ast_nodes": 39, + "n_identifiers": 5, + "d_id": 12556, + "documentation": { + "docstring": "\n Return a RevOptions object.\n\n Args:\n rev: the name of a revision to install.\n extra_args: a list of extra options.\n ", + "n_words": 19, + "vocab_size": 16, + "n_whitespaces": 59, + "language": "en" + } + }, + { + "id": 112723, + "commit_id": "cbac2c5c0f7606aca8ccf08fbd418ffe3adfe427", + "repo": "nni", + "path": "nni/algorithms/compression/v2/pytorch/base/compressor.py", + "file_name": "compressor.py", + "fun_name": "generate_module_groups", + "commit_message": "[Compression] fix typehints (#4800)", + "code": "def generate_module_groups(self) -> Dict[int, List[str]]:\n \n assert self.bound_model is not None, 'No model bounded in this compressor, please use Compressor.reset(model, config_list) to set it.'\n assert self.config_list is not None, 'No config_list set in this compressor, please use Compressor.reset(model, config_list) to set it.'\n\n self._unwrap_model()\n module_groups = {}\n for name, module in self.bound_model.named_modules():\n if module == self.bound_model:\n continue\n layer = LayerInfo(name, module)\n ret = None\n for idx, config in enumerate(self.config_list):\n config = config.copy()\n # expand config if key `default` is in config['op_types']\n if 'op_types' in config and 'default' in config['op_types']:\n expanded_op_types = []\n for op_type in config['op_types']:\n if op_type == 'default':\n expanded_op_types.extend(weighted_modules)\n else:\n expanded_op_types.append(op_type)\n config['op_types'] = expanded_op_types\n # check if condition is satisified\n if 'op_types' in config and layer.type not in config['op_types']:\n continue\n if 'op_names' in config and layer.name not in config['op_names']:\n continue\n ret = (idx, config)\n if ret is not None and 'exclude' not in ret[1]:\n module_groups.setdefault(ret[0], [])\n module_groups[ret[0]].append(name)\n\n self._wrap_model()\n return module_groups\n", + "url": "https://github.com/microsoft/nni.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 18, + "n_whitespaces": 606, + "n_words": 150, + "vocab_size": 78, + "complexity": 14, + "nloc": 38, + "token_counts": 227, + "n_ast_nodes": 377, + "n_identifiers": 28, + "d_id": 24731, + "documentation": { + "docstring": "\n Get all module names in each config in config_list.\n\n Returns\n -------\n Dict[int, List[str]]\n A dict. The key is the config idx in config_list, the value is the module name list. i.e., {1: ['layer.0', 'layer.2']}.\n ", + "n_words": 34, + "vocab_size": 27, + "n_whitespaces": 81, + "language": "en" + } + }, + { + "id": 282800, + "commit_id": "635851cbf633a6ea9423ba31fe8f68ab34423a8c", + "repo": "OpenBBTerminal", + "path": "bots/telegram/run_telegram.py", + "file_name": "run_telegram.py", + "fun_name": "send_welcome", + "commit_message": "Telegram Bot (#1458)\n\n* added initial code for telegram\r\n\r\n* improving suggestions and messages\r\n\r\n* improving messages\r\n\r\n* typo\r\n\r\nCo-authored-by: teh_coderer ", + "code": "def send_welcome(message):\n text = \n markdown = telebot.types.InlineKeyboardMarkup()\n markdown.add(\n telebot.types.InlineKeyboardButton(\n \"Star us on GitHub\",\n url=\"https://github.com/GamestonkTerminal/GamestonkTerminal\",\n )\n )\n markdown.add(\n telebot.types.InlineKeyboardButton(\n \"Join us on Discord\", url=\"https://discord.gg/XHsYvvjjWg\"\n )\n )\n bot.send_message(\n chat_id=message.chat.id, text=text, reply_markup=markdown, parse_mode=\"MARKDOWN\"\n )\n # bot.reply_to(markdown, text, parse_mode=\"MARKDOWN\")\n # bot.reply_to(message, text, parse_mode=\"MARKDOWN\")\n\n\n@bot.message_handler(commands=[\"cmds\", \"commands\"])", + "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", + "language": "Python", + "ast_errors": "@bot.message_handler(commands=[\"cmds\", \"commands\"])", + "n_ast_errors": 1, + "ast_levels": 11, + "n_whitespaces": 138, + "n_words": 41, + "vocab_size": 29, + "complexity": 1, + "nloc": 21, + "token_counts": 76, + "n_ast_nodes": 154, + "n_identifiers": 19, + "d_id": 84305, + "documentation": { + "docstring": "\nWelcome to *Gamestonk Terminal Bot* 🦋\nInvestment Research for Everyone\nCheck the available commands with /cmds\n ", + "n_words": 16, + "vocab_size": 16, + "n_whitespaces": 17, + "language": "en" + } + }, + { + "id": 217382, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/fnmatch.py", + "file_name": "fnmatch.py", + "fun_name": "filter", + "commit_message": "add python 3.10.4 for windows", + "code": "def filter(names, pat):\n \n result = []\n pat = os.path.normcase(pat)\n match = _compile_pattern(pat)\n if os.path is posixpath:\n # normcase on posix is NOP. Optimize it away from the loop.\n for name in names:\n if match(name):\n result.append(name)\n else:\n for name in names:\n if match(os.path.normcase(name)):\n result.append(name)\n return result\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 139, + "n_words": 45, + "vocab_size": 34, + "complexity": 6, + "nloc": 13, + "token_counts": 80, + "n_ast_nodes": 132, + "n_identifiers": 12, + "d_id": 54738, + "documentation": { + "docstring": "Construct a list from those elements of the iterable NAMES that match PAT.", + "n_words": 13, + "vocab_size": 13, + "n_whitespaces": 12, + "language": "en" + } + }, + { + "id": 3401, + "commit_id": "0a3713a5a52995dc0dc205d8edfd097bf625899f", + "repo": "airbyte", + "path": "airbyte-integrations/connectors/source-salesforce/unit_tests/unit_test.py", + "file_name": "unit_test.py", + "fun_name": "test_stream_unsupported_by_bulk", + "commit_message": "Source Salesforce: Deprecate API Type parameter (#9302)\n\n* use BULK for the first sync, REST for incremental sync\r\n\r\n* if stream contains compound data or/and base64 use always REST\r\n\r\n* fix get stream state from connector state\r\n\r\n* fix integration test\r\n\r\n* refactor catalog name\r\n\r\n* format code\r\n\r\n* refactor unit tests\r\n\r\n* refactor unit tests 2\r\n\r\n* format code 2\r\n\r\n* Set additionalProperties to true not to break test temporarily\r\n\r\n* fix unit test and remove unnecessary filtering fields\r\n\r\n* bump version\r\n\r\n* updated spec and def yaml\r\n\r\nCo-authored-by: auganbay ", + "code": "def test_stream_unsupported_by_bulk(stream_config, stream_api, caplog):\n \n stream_name = \"AcceptedEventRelation\"\n stream = _generate_stream(stream_name, stream_config, stream_api)\n assert not isinstance(stream, BulkSalesforceStream)\n\n", + "url": "https://github.com/airbytehq/airbyte.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 28, + "n_words": 16, + "vocab_size": 15, + "complexity": 1, + "nloc": 4, + "token_counts": 31, + "n_ast_nodes": 50, + "n_identifiers": 9, + "d_id": 469, + "documentation": { + "docstring": "\n Stream `AcceptedEventRelation` is not supported by BULK API, so that REST API stream will be used for it.\n ", + "n_words": 18, + "vocab_size": 18, + "n_whitespaces": 25, + "language": "en" + } + }, + { + "id": 220367, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/asyncio/base_events.py", + "file_name": "base_events.py", + "fun_name": "call_exception_handler", + "commit_message": "add python 3.10.4 for windows", + "code": "def call_exception_handler(self, context):\n \n if self._exception_handler is None:\n try:\n self.default_exception_handler(context)\n except (SystemExit, KeyboardInterrupt):\n raise\n except BaseException:\n # Second protection layer for unexpected errors\n # in the default implementation, as well as for subclassed\n # event loops with overloaded \"default_exception_handler\".\n logger.error('Exception in default exception handler',\n exc_info=True)\n else:\n try:\n self._exception_handler(self, context)\n except (SystemExit, KeyboardInterrupt):\n raise\n except BaseException as exc:\n # Exception in the user set custom exception handler.\n try:\n # Let's try default handler.\n self.default_exception_handler({\n 'message': 'Unhandled error in exception handler',\n 'exception': exc,\n 'context': context,\n })\n except (SystemExit, KeyboardInterrupt):\n raise\n except BaseException:\n # Guard 'default_exception_handler' in case it is\n # overloaded.\n logger.error('Exception in default exception handler '\n 'while handling an unexpected error '\n 'in custom exception handler',\n exc_info=True)\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 18, + "n_whitespaces": 708, + "n_words": 115, + "vocab_size": 69, + "complexity": 8, + "nloc": 28, + "token_counts": 113, + "n_ast_nodes": 202, + "n_identifiers": 12, + "d_id": 55979, + "documentation": { + "docstring": "Call the current event loop's exception handler.\n\n The context argument is a dict containing the following keys:\n\n - 'message': Error message;\n - 'exception' (optional): Exception object;\n - 'future' (optional): Future instance;\n - 'task' (optional): Task instance;\n - 'handle' (optional): Handle instance;\n - 'protocol' (optional): Protocol instance;\n - 'transport' (optional): Transport instance;\n - 'socket' (optional): Socket instance;\n - 'asyncgen' (optional): Asynchronous generator that caused\n the exception.\n\n New keys maybe introduced in the future.\n\n Note: do not overload this method in an event loop subclass.\n For custom exception handling, use the\n `set_exception_handler()` method.\n ", + "n_words": 91, + "vocab_size": 64, + "n_whitespaces": 228, + "language": "en" + } + }, + { + "id": 71, + "commit_id": "10ae1d589044a6ae4722ead7aedc63fcdc4923b5", + "repo": "PySyft", + "path": "packages/syft/tests/syft/core/tensor/tensor_serde_test.py", + "file_name": "tensor_serde_test.py", + "fun_name": "test_sept_child", + "commit_message": "Started DPTensor resource optimization\n\n- Added initial REPT and SEPT benchmarking tests\n- Deleted unused old Tensor classes\n- Added pympler for memory size tests\n\nCo-authored-by: @IshanMi\nCo-authored-by: @rasswanth-s", + "code": "def test_sept_child() -> None:\n \n rows = 10_000\n cols = 7\n # these times and sizes are based on the above constants and Madhavas MacBook Pro 2019\n expected_sept_mem_size = 0.8035125732421875\n expected_sept_ser_size = 1.4993972778320312\n macbook_pro_2019_ser_time = 0.03371272199999975\n macbook_pro_2019_de_time = 0.02922678500000009\n\n sept = make_sept(rows=rows, cols=cols)\n\n start = timeit.default_timer()\n ser = sy.serialize(sept, to_bytes=True)\n end = timeit.default_timer()\n time_ser = end - start\n\n start = timeit.default_timer()\n de = sy.deserialize(ser, from_bytes=True)\n end = timeit.default_timer()\n time_de = end - start\n\n assert sept == de\n\n current_sept_mem_size = size(sept)\n mem_diff = (current_sept_mem_size / expected_sept_mem_size * 100) - 100\n\n current_sept_bytes_size = size(ser)\n bytes_diff = (current_sept_bytes_size / expected_sept_ser_size * 100) - 100\n\n ser_time_diff = (time_ser / macbook_pro_2019_ser_time * 100) - 100\n de_time_diff = (time_de / macbook_pro_2019_de_time * 100) - 100\n\n print(\"SEPT Stats\")\n print(\"==========\")\n print(\"In-memory size of SEPT\", size(sept))\n print(\"Serialized size of SEPT\", size(ser))\n print(f\"Serializing {rows}x{cols} took {time_ser} secs\")\n print(f\"Deserializing {rows}x{cols} took {time_de} secs\")\n\n print(\"Current Results\")\n print(\"===============\")\n print(f\"In-memory size delta: {mem_diff}%\")\n print(f\"Serialized size delta: {bytes_diff}%\")\n print(f\"Serializing time delta: {ser_time_diff}%\")\n print(f\"Deserializing time delta: {de_time_diff}%\")\n\n # we want to assert that our calculated values are smaller than the old values with\n # some tolerance\n assert (current_sept_mem_size - expected_sept_mem_size) < 1e-3\n assert (current_sept_bytes_size - expected_sept_ser_size) < 2e-3\n # TODO: make time benchmarks stable (probably can't run in parallel)\n # assert (time_ser - macbook_pro_2019_ser_time) < 2e-1\n # assert (time_de - macbook_pro_2019_de_time) < 2e-1\n\n", + "url": "https://github.com/OpenMined/PySyft.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 345, + "n_words": 216, + "vocab_size": 123, + "complexity": 1, + "nloc": 38, + "token_counts": 251, + "n_ast_nodes": 449, + "n_identifiers": 30, + "d_id": 45, + "documentation": { + "docstring": "We need to benchmark both the size and time to serialize and deserialize SEPTs", + "n_words": 14, + "vocab_size": 12, + "n_whitespaces": 13, + "language": "en" + } + }, + { + "id": 47362, + "commit_id": "8b687ec82a7047fc35410f5c5bb0726de434e749", + "repo": "airflow", + "path": "tests/models/test_taskinstance.py", + "file_name": "test_taskinstance.py", + "fun_name": "test_xcom_pull_after_deferral", + "commit_message": "Do not clear XCom when resuming from deferral (#22932)", + "code": "def test_xcom_pull_after_deferral(self, create_task_instance, session):\n \n\n key = 'xcom_key'\n value = 'xcom_value'\n\n ti = create_task_instance(\n dag_id='test_xcom',\n schedule_interval='@monthly',\n task_id='test_xcom',\n pool='test_xcom',\n )\n\n ti.run(mark_success=True)\n ti.xcom_push(key=key, value=value)\n\n ti.next_method = \"execute\"\n session.merge(ti)\n session.commit()\n\n ti.run(ignore_all_deps=True)\n assert ti.xcom_pull(task_ids='test_xcom', key=key) == value\n", + "url": "https://github.com/apache/airflow.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 160, + "n_words": 32, + "vocab_size": 28, + "complexity": 1, + "nloc": 16, + "token_counts": 96, + "n_ast_nodes": 165, + "n_identifiers": 20, + "d_id": 9082, + "documentation": { + "docstring": "\n tests xcom will not clear before a task runs its next method after deferral.\n ", + "n_words": 14, + "vocab_size": 14, + "n_whitespaces": 29, + "language": "en" + } + }, + { + "id": 244730, + "commit_id": "9d7511d8c35df1f9c13b17eb770136859bf370be", + "repo": "mmdetection", + "path": "tests/test_models/test_dense_heads/test_pisa_ssd_head.py", + "file_name": "test_pisa_ssd_head.py", + "fun_name": "test_pisa_ssd_head_loss", + "commit_message": "Update SSD and PISA-SSD model config", + "code": "def test_pisa_ssd_head_loss(self):\n \n s = 300\n img_metas = [{\n 'img_shape': (s, s, 3),\n 'scale_factor': 1,\n }]\n cfg = Config(\n dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.,\n ignore_iof_thr=-1,\n gt_max_assign_all=False),\n sampler=dict(type='PseudoSampler'),\n smoothl1_beta=1.,\n allowed_border=-1,\n pos_weight=-1,\n neg_pos_ratio=3,\n debug=False))\n pisa_ssd_head = PISASSDHead(\n num_classes=4,\n in_channels=(1, 1, 1, 1, 1, 1),\n anchor_generator=dict(\n type='SSDAnchorGenerator',\n scale_major=False,\n input_size=s,\n basesize_ratio_range=(0.15, 0.9),\n strides=[8, 16, 32, 64, 100, 300],\n ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),\n train_cfg=cfg)\n\n # PISA SSD head expects a multiple levels of features per image\n feats = (\n torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))\n for stride in pisa_ssd_head.prior_generator.strides)\n cls_scores, bbox_preds = pisa_ssd_head.forward(feats)\n\n # test without isr and carl\n # Test that empty ground truth encourages the network to\n # predict background\n gt_instances = InstanceData()\n gt_instances.bboxes = torch.empty((0, 4))\n gt_instances.labels = torch.LongTensor([])\n\n empty_gt_losses = pisa_ssd_head.loss(cls_scores, bbox_preds,\n [gt_instances], img_metas)\n # When there is no truth, cls_loss and box_loss should all be zero.\n empty_cls_loss = sum(empty_gt_losses['loss_cls'])\n empty_box_loss = sum(empty_gt_losses['loss_bbox'])\n self.assertEqual(\n empty_cls_loss.item(), 0,\n 'there should be no cls loss when there are no true boxes')\n self.assertEqual(\n empty_box_loss.item(), 0,\n 'there should be no box loss when there are no true boxes')\n\n # When truth is non-empty then both cls and box loss\n # should be nonzero for random inputs\n gt_instances = InstanceData()\n gt_instances.bboxes = torch.Tensor(\n [[23.6667, 23.8757, 238.6326, 151.8874]])\n gt_instances.labels = torch.LongTensor([2])\n\n one_gt_losses = pisa_ssd_head.loss(cls_scores, bbox_preds,\n [gt_instances], img_metas)\n onegt_cls_loss = sum(one_gt_losses['loss_cls'])\n onegt_box_loss = sum(one_gt_losses['loss_bbox'])\n self.assertGreater(onegt_cls_loss.item(), 0,\n 'cls loss should be non-zero')\n self.assertGreater(onegt_box_loss.item(), 0,\n 'box loss should be non-zero')\n\n pisa_ssd_head.train_cfg.update(\n dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))\n\n # test with isr and carl\n # Test that empty ground truth encourages the network to\n # predict background\n gt_instances = InstanceData()\n gt_instances.bboxes = torch.empty((0, 4))\n gt_instances.labels = torch.LongTensor([])\n\n empty_gt_losses = pisa_ssd_head.loss(cls_scores, bbox_preds,\n [gt_instances], img_metas)\n # When there is no truth, cls_loss and box_loss should all be zero.\n empty_cls_loss = sum(empty_gt_losses['loss_cls'])\n empty_box_loss = sum(empty_gt_losses['loss_bbox'])\n self.assertEqual(\n empty_cls_loss.item(), 0,\n 'there should be no cls loss when there are no true boxes')\n self.assertEqual(\n empty_box_loss.item(), 0,\n 'there should be no box loss when there are no true boxes')\n\n # When truth is non-empty then both cls and box loss\n # should be nonzero for random inputs\n gt_instances = InstanceData()\n gt_instances.bboxes = torch.Tensor(\n [[23.6667, 23.8757, 238.6326, 151.8874]])\n gt_instances.labels = torch.LongTensor([2])\n\n one_gt_losses = pisa_ssd_head.loss(cls_scores, bbox_preds,\n [gt_instances], img_metas)\n onegt_cls_loss = sum(one_gt_losses['loss_cls'])\n onegt_box_loss = sum(one_gt_losses['loss_bbox'])\n self.assertGreater(onegt_cls_loss.item(), 0,\n 'cls loss should be non-zero')\n self.assertGreater(onegt_box_loss.item(), 0,\n 'box loss should be non-zero')\n", + "url": "https://github.com/open-mmlab/mmdetection.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 1565, + "n_words": 382, + "vocab_size": 162, + "complexity": 2, + "nloc": 88, + "token_counts": 698, + "n_ast_nodes": 1025, + "n_identifiers": 63, + "d_id": 70500, + "documentation": { + "docstring": "Tests pisa ssd head loss when truth is empty and non-empty.", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 61060, + "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", + "repo": "transferlearning", + "path": ".venv/lib/python3.8/site-packages/pip/_internal/resolution/legacy/resolver.py", + "file_name": "resolver.py", + "fun_name": "get_installation_order", + "commit_message": "upd; format", + "code": "def get_installation_order(self, req_set):\n # type: (RequirementSet) -> List[InstallRequirement]\n \n # The current implementation, which we may change at any point\n # installs the user specified things in the order given, except when\n # dependencies must come earlier to achieve topological order.\n order = []\n ordered_reqs = set() # type: Set[InstallRequirement]\n", + "url": "https://github.com/jindongwang/transferlearning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 99, + "n_words": 49, + "vocab_size": 41, + "complexity": 2, + "nloc": 7, + "token_counts": 36, + "n_ast_nodes": 36, + "n_identifiers": 6, + "d_id": 12403, + "documentation": { + "docstring": "Create the installation order.\n\n The installation order is topological - requirements are installed\n before the requiring thing. We break cycles at an arbitrary point,\n and make no other guarantees.\n ", + "n_words": 29, + "vocab_size": 27, + "n_whitespaces": 57, + "language": "en" + } + }, + { + "id": 76287, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/utils/setup.py", + "file_name": "setup.py", + "fun_name": "bump_client_version", + "commit_message": "Reformat with black", + "code": "def bump_client_version(self):\n \n path = os.path.join(\".\", \"client\", \"package.json\")\n input_file = io.open(path, \"r\")\n\n try:\n package = json.loads(input_file.read().decode(\"utf-8\"))\n except (ValueError) as e:\n print(\"Unable to read \" + path + \" \" + e) # noqa\n raise SystemExit(1)\n\n package[\"version\"] = __semver__\n\n try:\n with io.open(path, \"w\", encoding=\"utf-8\") as f:\n f.write(str(json.dumps(package, indent=2, ensure_ascii=False)))\n except (IOError) as e:\n print(\"Error setting the version for front-end assets: \" + str(e)) # noqa\n raise SystemExit(1)\n\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 204, + "n_words": 65, + "vocab_size": 45, + "complexity": 3, + "nloc": 15, + "token_counts": 138, + "n_ast_nodes": 247, + "n_identifiers": 26, + "d_id": 16486, + "documentation": { + "docstring": "\n Writes the current Wagtail version number into package.json\n ", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 23, + "language": "en" + } + }, + { + "id": 316830, + "commit_id": "5b32eea3d04d223b01efddb5c13a88e540df8741", + "repo": "core", + "path": "homeassistant/components/wallbox/number.py", + "file_name": "number.py", + "fun_name": "native_min_value", + "commit_message": "Add support for bidirectional chargers to Wallbox integration (#74313)\n\n* Add support for the Quasar bidirectional charger to the Wallbox integration, including ability to control charger while discharging, set a negative charge rate and monitor discharged amount\r\n\r\n* Make code more generic in order to support other bidirectional models in the future\r\n\r\n* Updates to files to comply with HA formatting rules\r\n\r\n* Change const file to fix black check failure\r\n\r\n* Remove unnecessay loop in number entity", + "code": "def native_min_value(self) -> float:\n \n return (self.max_value * -1) if self._is_bidirectional else 6\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 26, + "n_words": 12, + "vocab_size": 12, + "complexity": 2, + "nloc": 3, + "token_counts": 23, + "n_ast_nodes": 38, + "n_identifiers": 5, + "d_id": 115406, + "documentation": { + "docstring": "Return the minimum available current based on charger type - some chargers can discharge.", + "n_words": 14, + "vocab_size": 14, + "n_whitespaces": 13, + "language": "en" + } + }, + { + "id": 82360, + "commit_id": "c412e97acba65a2a68e70ca15ea950bd31f90d3e", + "repo": "django-cms", + "path": "cms/tests/test_cache.py", + "file_name": "test_cache.py", + "fun_name": "test_cache_limit_ttl_greater_than_default_cache_ttl", + "commit_message": "feat: add cache ttl extension point (#7299)\n\nAdds the setting `CMS_CACHE_LIMIT_TTL_CLASS` that should have a\r\n`limit_page_cache_ttl` method that would be called to limit the cache\r\nttl of a page using business logic.\r\nCloses #7296", + "code": "def test_cache_limit_ttl_greater_than_default_cache_ttl(self):\n \n page1 = create_page('test page 1', 'nav_playground.html', 'en',\n published=True)\n page1_url = page1.get_absolute_url()\n\n limit_page_cache_ttl_function = \".\".join([PlaceholderCacheTestCase.__module__, limit_page_cache_ttl_test_500.__name__])\n with self.settings(CMS_LIMIT_TTL_CACHE_FUNCTION=limit_page_cache_ttl_function):\n page1.publish('en')\n request = self.get_request(page1_url)\n request.current_page = Page.objects.get(pk=page1.pk)\n response = self.client.get(page1_url)\n self.assertTrue('max-age=40' in response['Cache-Control'], response['Cache-Control']) # noqa\n\n", + "url": "https://github.com/django-cms/django-cms.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 153, + "n_words": 35, + "vocab_size": 30, + "complexity": 1, + "nloc": 11, + "token_counts": 109, + "n_ast_nodes": 190, + "n_identifiers": 26, + "d_id": 17357, + "documentation": { + "docstring": "\n Test the `CMS_LIMIT_TTL_CACHE_FUNCTION` setting with a class that returns a value much\n greater thant the default value of 40 seconds.\n ", + "n_words": 20, + "vocab_size": 17, + "n_whitespaces": 42, + "language": "en" + } + }, + { + "id": 82288, + "commit_id": "a3110e1ff24085373898c7d2a85f628abeb8518d", + "repo": "django-cms", + "path": "cms/models/managers.py", + "file_name": "managers.py", + "fun_name": "subordinate_to_user", + "commit_message": "Enabled isort workflow (#7200)\n\n* Ran isort\r\n\r\n* Enabled isort workflow\r\n\r\nCo-authored-by: Vinit Kumar ", + "code": "def subordinate_to_user(self, user, site):\n \n # get user level\n from cms.utils.page_permissions import get_change_permissions_id_list\n from cms.utils.permissions import get_user_permission_level\n\n try:\n user_level = get_user_permission_level(user, site)\n except NoPermissionsException:\n return self.none()\n\n if user_level == ROOT_USER_LEVEL:\n return self.all()\n\n # get all permissions\n page_id_allow_list = get_change_permissions_id_list(user, site, check_global=False)\n\n # get permission set, but without objects targeting user, or any group\n # in which he can be\n qs = self.filter(\n page__id__in=page_id_allow_list,\n page__node__depth__gte=user_level,\n )\n qs = qs.exclude(user=user).exclude(group__user=user)\n return qs\n", + "url": "https://github.com/django-cms/django-cms.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 229, + "n_words": 69, + "vocab_size": 53, + "complexity": 3, + "nloc": 16, + "token_counts": 102, + "n_ast_nodes": 168, + "n_identifiers": 23, + "d_id": 17339, + "documentation": { + "docstring": "Get all page permission objects on which user/group is lover in\n hierarchy then given user and given user can change permissions on them.\n\n !IMPORTANT, but exclude objects with given user, or any group containing\n this user - he can't be able to change his own permissions, because if\n he does, and removes some permissions from himself, he will not be able\n to add them anymore.\n\n Example:\n A\n / \\\n user B,E\n / \\\n C,X D,Y\n\n Gives permission nodes C,X,D,Y under user, so he can edit\n permissions if he haves can_change_permission.\n\n Example:\n A,Y\n / \\\n user B,E,X\n / \\\n C,X D,Y\n\n Gives permission nodes C,D under user, so he can edit, but not\n anymore to X,Y, because this users are on the same level or higher\n in page hierarchy. (but only if user have can_change_permission)\n\n Example:\n A\n / \\\n user B,E\n / | \\\n C,X D,Y user\n / \\\n I J,A\n\n User permissions can be assigned to multiple page nodes, so merge of\n all of them is required. In this case user can see permissions for\n users C,X,D,Y,I,J but not A, because A user in higher in hierarchy.\n\n If permission object holds group, this permission object can be visible\n to user only if all of the group members are lover in hierarchy. If any\n of members is higher then given user, this entry must stay invisible.\n\n If user is superuser, or haves global can_change_permission permissions,\n show him everything.\n\n Result of this is used in admin for page permissions inline.\n ", + "n_words": 248, + "vocab_size": 119, + "n_whitespaces": 1085, + "language": "en" + } + }, + { + "id": 263956, + "commit_id": "dc12cb59559f99110917bcbd21c9960ab57d994f", + "repo": "pyinstaller", + "path": "tests/unit/test_bytecode.py", + "file_name": "test_bytecode.py", + "fun_name": "test_finditer", + "commit_message": "tests: fix test_finditer\n\nHave the test use bytestrings instead of strings.\n\nAlso assert that the bytecode string passed to bytecode.finditer()\nis in fact a bytestring.", + "code": "def test_finditer():\n \n matches = list(finditer(re.compile(rb\"\\d+\"), b\"0123 4567 890 12 3 4\"))\n aligned = [i.group() for i in matches]\n assert aligned == [b\"0123\", b\"567\", b\"890\", b\"12\"]\n", + "url": "https://github.com/pyinstaller/pyinstaller.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 37, + "n_words": 25, + "vocab_size": 23, + "complexity": 2, + "nloc": 4, + "token_counts": 52, + "n_ast_nodes": 84, + "n_identifiers": 9, + "d_id": 77525, + "documentation": { + "docstring": "\n Test that bytecode.finditer() yields matches only that start on an even byte (``match.start() % 2 == 0``).\n\n There are 3 permutations here when considering a match:\n - A match starts on an even byte:\n That's good! Include that sequence.\n - A single character match starts on an odd byte:\n Ignore it. It's a false positive.\n - A multi-character match starts on an odd byte:\n This match will be a false positive but there may be a genuine match shortly afterwards (in the case of the\n # test below - it'll be the next character) which overlaps with this one so we must override regex's\n behaviour of ignoring overlapping matches to prevent these from getting lost.\n ", + "n_words": 115, + "vocab_size": 82, + "n_whitespaces": 169, + "language": "en" + } + }, + { + "id": 167802, + "commit_id": "f65417656ba8c59438d832b6e2a431f78d40c21c", + "repo": "pandas", + "path": "pandas/core/reshape/util.py", + "file_name": "util.py", + "fun_name": "cartesian_product", + "commit_message": "TYP: more return annotations in core/ (#47618)\n\n* TYP: more return annotations in core/\r\n\r\n* from __future__ import annotations\r\n\r\n* more __future__", + "code": "def cartesian_product(X) -> list[np.ndarray]:\n \n msg = \"Input must be a list-like of list-likes\"\n if not is_list_like(X):\n raise TypeError(msg)\n for x in X:\n if not is_list_like(x):\n raise TypeError(msg)\n\n if len(X) == 0:\n return []\n\n lenX = np.fromiter((len(x) for x in X), dtype=np.intp)\n cumprodX = np.cumproduct(lenX)\n\n if np.any(cumprodX < 0):\n raise ValueError(\"Product space too large to allocate arrays!\")\n\n a = np.roll(cumprodX, 1)\n a[0] = 1\n\n if cumprodX[-1] != 0:\n b = cumprodX[-1] / cumprodX\n else:\n # if any factor is empty, the cartesian product is empty\n b = np.zeros_like(cumprodX)\n\n return [tile_compat(np.repeat(x, b[i]), np.product(a[i])) for i, x in enumerate(X)]\n\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 195, + "n_words": 96, + "vocab_size": 68, + "complexity": 9, + "nloc": 42, + "token_counts": 182, + "n_ast_nodes": 290, + "n_identifiers": 27, + "d_id": 40127, + "documentation": { + "docstring": "\n Numpy version of itertools.product.\n Sometimes faster (for large inputs)...\n\n Parameters\n ----------\n X : list-like of list-likes\n\n Returns\n -------\n product : list of ndarrays\n\n Examples\n --------\n >>> cartesian_product([list('ABC'), [1, 2]])\n [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype=' bool:\n \n return self._engine.is_monotonic_decreasing\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 7, + "n_whitespaces": 20, + "n_words": 6, + "vocab_size": 6, + "complexity": 1, + "nloc": 14, + "token_counts": 14, + "n_ast_nodes": 25, + "n_identifiers": 4, + "d_id": 40199, + "documentation": { + "docstring": "\n Return a boolean if the values are equal or decreasing.\n\n Examples\n --------\n >>> Index([3, 2, 1]).is_monotonic_decreasing\n True\n >>> Index([3, 2, 2]).is_monotonic_decreasing\n True\n >>> Index([3, 1, 2]).is_monotonic_decreasing\n False\n ", + "n_words": 27, + "vocab_size": 20, + "n_whitespaces": 98, + "language": "en" + } + }, + { + "id": 206808, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/views/debug.py", + "file_name": "debug.py", + "fun_name": "get_traceback_frame_variables", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def get_traceback_frame_variables(self, request, tb_frame):\n \n # Loop through the frame's callers to see if the sensitive_variables\n # decorator was used.\n current_frame = tb_frame.f_back\n sensitive_variables = None\n while current_frame is not None:\n if (\n current_frame.f_code.co_name == \"sensitive_variables_wrapper\"\n and \"sensitive_variables_wrapper\" in current_frame.f_locals\n ):\n # The sensitive_variables decorator was used, so we take note\n # of the sensitive variables' names.\n wrapper = current_frame.f_locals[\"sensitive_variables_wrapper\"]\n sensitive_variables = getattr(wrapper, \"sensitive_variables\", None)\n break\n current_frame = current_frame.f_back\n\n cleansed = {}\n if self.is_active(request) and sensitive_variables:\n if sensitive_variables == \"__ALL__\":\n # Cleanse all variables\n for name in tb_frame.f_locals:\n cleansed[name] = self.cleansed_substitute\n else:\n # Cleanse specified variables\n for name, value in tb_frame.f_locals.items():\n if name in sensitive_variables:\n value = self.cleansed_substitute\n else:\n value = self.cleanse_special_types(request, value)\n cleansed[name] = value\n else:\n # Potentially cleanse the request and any MultiValueDicts if they\n # are one of the frame variables.\n for name, value in tb_frame.f_locals.items():\n cleansed[name] = self.cleanse_special_types(request, value)\n\n if (\n tb_frame.f_code.co_name == \"sensitive_variables_wrapper\"\n and \"sensitive_variables_wrapper\" in tb_frame.f_locals\n ):\n # For good measure, obfuscate the decorated function's arguments in\n # the sensitive_variables decorator's frame, in case the variables\n # associated with those arguments were meant to be obfuscated from\n # the decorated function's frame.\n cleansed[\"func_args\"] = self.cleansed_substitute\n cleansed[\"func_kwargs\"] = self.cleansed_substitute\n\n return cleansed.items()\n\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 18, + "n_whitespaces": 757, + "n_words": 195, + "vocab_size": 105, + "complexity": 13, + "nloc": 34, + "token_counts": 209, + "n_ast_nodes": 355, + "n_identifiers": 19, + "d_id": 51719, + "documentation": { + "docstring": "\n Replace the values of variables marked as sensitive with\n stars (*********).\n ", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 33, + "language": "en" + } + }, + { + "id": 123112, + "commit_id": "803b90729d25fda253011c505d0189e8e63cc039", + "repo": "EasyOCR", + "path": "easyocr/DBNet/DBNet.py", + "file_name": "DBNet.py", + "fun_name": "image2hmap", + "commit_message": "add dbnet", + "code": "def image2hmap(self, image_tensor):\n \n return self.model.forward(image_tensor, training=False)\n ", + "url": "https://github.com/JaidedAI/EasyOCR.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 28, + "n_words": 6, + "vocab_size": 6, + "complexity": 1, + "nloc": 2, + "token_counts": 21, + "n_ast_nodes": 34, + "n_identifiers": 6, + "d_id": 27289, + "documentation": { + "docstring": "\n Run the model to obtain a heatmap tensor from a image tensor. The heatmap\n tensor indicates the probability of each pixel being a part of text area.\n\n Parameters\n ----------\n image_tensor : torch.tensor\n Image tensor.\n\n Returns\n -------\n torch.tensor\n Probability heatmap tensor.\n ", + "n_words": 40, + "vocab_size": 30, + "n_whitespaces": 126, + "language": "en" + } + }, + { + "id": 223533, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/email/_header_value_parser.py", + "file_name": "_header_value_parser.py", + "fun_name": "get_extended_attribute", + "commit_message": "add python 3.10.4 for windows", + "code": "def get_extended_attribute(value):\n \n # XXX: should we have an ExtendedAttribute TokenList?\n attribute = Attribute()\n if value and value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n attribute.append(token)\n if value and value[0] in EXTENDED_ATTRIBUTE_ENDS:\n raise errors.HeaderParseError(\n \"expected token but found '{}'\".format(value))\n token, value = get_extended_attrtext(value)\n attribute.append(token)\n if value and value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n attribute.append(token)\n return attribute, value\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 129, + "n_words": 56, + "vocab_size": 33, + "complexity": 7, + "nloc": 14, + "token_counts": 99, + "n_ast_nodes": 164, + "n_identifiers": 13, + "d_id": 56956, + "documentation": { + "docstring": " [CFWS] 1*extended_attrtext [CFWS]\n\n This is like the non-extended version except we allow % characters, so that\n we can pick up an encoded value as a single string.\n\n ", + "n_words": 27, + "vocab_size": 25, + "n_whitespaces": 37, + "language": "en" + } + }, + { + "id": 73597, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/contrib/table_block/tests.py", + "file_name": "tests.py", + "fun_name": "test_table_options_language", + "commit_message": "Reformat with black", + "code": "def test_table_options_language(self):\n \n # default must always contain a language value\n block = TableBlock()\n self.assertIn(\"language\", block.table_options)\n # French\n translation.activate(\"fr-fr\")\n block_fr = TableBlock()\n self.assertEqual(\"fr-fr\", block_fr.table_options[\"language\"])\n translation.activate(\"it\")\n # Italian\n block_it = TableBlock()\n self.assertEqual(\"it\", block_it.table_options[\"language\"])\n # table_options with language provided, different to environment\n block_with_lang = TableBlock(table_options={\"language\": \"ja\"})\n self.assertNotEqual(\"it\", block_with_lang.table_options[\"language\"])\n self.assertEqual(\"ja\", block_with_lang.table_options[\"language\"])\n translation.activate(\"en\")\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 167, + "n_words": 48, + "vocab_size": 38, + "complexity": 1, + "nloc": 13, + "token_counts": 113, + "n_ast_nodes": 212, + "n_identifiers": 13, + "d_id": 16063, + "documentation": { + "docstring": "\n Test that the environment's language is used if no language provided.\n ", + "n_words": 11, + "vocab_size": 10, + "n_whitespaces": 26, + "language": "en" + } + }, + { + "id": 271831, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/engine/training_utils.py", + "file_name": "training_utils.py", + "fun_name": "get_static_batch_size", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def get_static_batch_size(layer):\n \n batch_input_shape, _ = get_input_shape_and_dtype(layer)\n if batch_input_shape is not None:\n return tf.compat.v1.Dimension(batch_input_shape[0]).value\n return None\n\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 34, + "n_words": 15, + "vocab_size": 14, + "complexity": 2, + "nloc": 5, + "token_counts": 38, + "n_ast_nodes": 62, + "n_identifiers": 10, + "d_id": 80855, + "documentation": { + "docstring": "Gets the static batch size of a Layer.\n\n Args:\n layer: a `Layer` instance.\n\n Returns:\n The static batch size of a Layer.\n ", + "n_words": 21, + "vocab_size": 14, + "n_whitespaces": 40, + "language": "en" + } + }, + { + "id": 176475, + "commit_id": "f6755ffa00211b523c6c0bec5398bc6c3c43c8b1", + "repo": "networkx", + "path": "networkx/algorithms/community/quality.py", + "file_name": "quality.py", + "fun_name": "modularity", + "commit_message": "Update black (#5438)\n\n* CI: sync up black dev requirements version with precommit\r\n\r\n* Run black\r\n\r\nCo-authored-by: Jarrod Millman ", + "code": "def modularity(G, communities, weight=\"weight\", resolution=1):\n r\n if not isinstance(communities, list):\n communities = list(communities)\n if not is_partition(G, communities):\n raise NotAPartition(G, communities)\n\n directed = G.is_directed()\n if directed:\n out_degree = dict(G.out_degree(weight=weight))\n in_degree = dict(G.in_degree(weight=weight))\n m = sum(out_degree.values())\n norm = 1 / m**2\n else:\n out_degree = in_degree = dict(G.degree(weight=weight))\n deg_sum = sum(out_degree.values())\n m = deg_sum / 2\n norm = 1 / deg_sum**2\n", + "url": "https://github.com/networkx/networkx.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 145, + "n_words": 58, + "vocab_size": 36, + "complexity": 4, + "nloc": 99, + "token_counts": 152, + "n_ast_nodes": 226, + "n_identifiers": 20, + "d_id": 41926, + "documentation": { + "docstring": "Returns the modularity of the given partition of the graph.\n\n Modularity is defined in [1]_ as\n\n .. math::\n Q = \\frac{1}{2m} \\sum_{ij} \\left( A_{ij} - \\gamma\\frac{k_ik_j}{2m}\\right)\n \\delta(c_i,c_j)\n\n where $m$ is the number of edges, $A$ is the adjacency matrix of `G`,\n $k_i$ is the degree of $i$, $\\gamma$ is the resolution parameter,\n and $\\delta(c_i, c_j)$ is 1 if $i$ and $j$ are in the same community else 0.\n\n According to [2]_ (and verified by some algebra) this can be reduced to\n\n .. math::\n Q = \\sum_{c=1}^{n}\n \\left[ \\frac{L_c}{m} - \\gamma\\left( \\frac{k_c}{2m} \\right) ^2 \\right]\n\n where the sum iterates over all communities $c$, $m$ is the number of edges,\n $L_c$ is the number of intra-community links for community $c$,\n $k_c$ is the sum of degrees of the nodes in community $c$,\n and $\\gamma$ is the resolution parameter.\n\n The resolution parameter sets an arbitrary tradeoff between intra-group\n edges and inter-group edges. More complex grouping patterns can be\n discovered by analyzing the same network with multiple values of gamma\n and then combining the results [3]_. That said, it is very common to\n simply use gamma=1. More on the choice of gamma is in [4]_.\n\n The second formula is the one actually used in calculation of the modularity.\n For directed graphs the second formula replaces $k_c$ with $k^{in}_c k^{out}_c$.\n\n Parameters\n ----------\n G : NetworkX Graph\n\n communities : list or iterable of set of nodes\n These node sets must represent a partition of G's nodes.\n\n weight : string or None, optional (default=\"weight\")\n The edge attribute that holds the numerical value used\n as a weight. If None or an edge does not have that attribute,\n then that edge has weight 1.\n\n resolution : float (default=1)\n If resolution is less than 1, modularity favors larger communities.\n Greater than 1 favors smaller communities.\n\n Returns\n -------\n Q : float\n The modularity of the paritition.\n\n Raises\n ------\n NotAPartition\n If `communities` is not a partition of the nodes of `G`.\n\n Examples\n --------\n >>> import networkx.algorithms.community as nx_comm\n >>> G = nx.barbell_graph(3, 0)\n >>> nx_comm.modularity(G, [{0, 1, 2}, {3, 4, 5}])\n 0.35714285714285715\n >>> nx_comm.modularity(G, nx_comm.label_propagation_communities(G))\n 0.35714285714285715\n\n References\n ----------\n .. [1] M. E. J. Newman \"Networks: An Introduction\", page 224.\n Oxford University Press, 2011.\n .. [2] Clauset, Aaron, Mark EJ Newman, and Cristopher Moore.\n \"Finding community structure in very large networks.\"\n Phys. Rev. E 70.6 (2004). \n .. [3] Reichardt and Bornholdt \"Statistical Mechanics of Community Detection\"\n Phys. Rev. E 74, 016110, 2006. https://doi.org/10.1103/PhysRevE.74.016110\n .. [4] M. E. J. Newman, \"Equivalence between modularity optimization and\n maximum likelihood methods for community detection\"\n Phys. Rev. E 94, 052315, 2016. https://doi.org/10.1103/PhysRevE.94.052315\n\n ", + "n_words": 425, + "vocab_size": 259, + "n_whitespaces": 682, + "language": "en" + } + }, + { + "id": 114143, + "commit_id": "3023d6e0223e50c2d7cbe850f1c5355e5d505ceb", + "repo": "mindsdb", + "path": "mindsdb/api/mongo/classes/responder.py", + "file_name": "responder.py", + "fun_name": "handle", + "commit_message": "renaming", + "code": "def handle(self, query, args, env, session):\n \n if isinstance(self.result, dict):\n return self.result\n else:\n return self.result(query, args, env, session)\n", + "url": "https://github.com/mindsdb/mindsdb.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 60, + "n_words": 17, + "vocab_size": 14, + "complexity": 2, + "nloc": 5, + "token_counts": 43, + "n_ast_nodes": 63, + "n_identifiers": 9, + "d_id": 25106, + "documentation": { + "docstring": " making answer based on params:\n\n query (dict): document(s) from request\n args (dict): all other significant information from request: flags, collection name, rows to return, etc\n env (dict): config, model_interface instance, and other mindsdb related stuff\n session (object): current session\n\n returns documents as dict or list of dicts\n ", + "n_words": 47, + "vocab_size": 42, + "n_whitespaces": 90, + "language": "en" + } + }, + { + "id": 276712, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/utils/control_flow_util.py", + "file_name": "control_flow_util.py", + "fun_name": "GetContainingXLAContext", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def GetContainingXLAContext(ctxt):\n \n while ctxt:\n if ctxt.IsXLAContext():\n return ctxt\n ctxt = ctxt.outer_context\n return None\n\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 47, + "n_words": 13, + "vocab_size": 11, + "complexity": 3, + "nloc": 6, + "token_counts": 25, + "n_ast_nodes": 44, + "n_identifiers": 4, + "d_id": 81705, + "documentation": { + "docstring": "Returns the first ancestor XLAContext of `ctxt`.\n\n Returns `ctxt` if `ctxt` is a XLAContext, or None if `ctxt` is not in a\n while loop.\n\n Args:\n ctxt: ControlFlowContext\n\n Returns:\n `ctxt` if `ctxt` is a XLAContext, the most nested XLAContext containing\n `ctxt`, or None if `ctxt` is not in a while loop.\n ", + "n_words": 50, + "vocab_size": 26, + "n_whitespaces": 80, + "language": "en" + } + }, + { + "id": 100921, + "commit_id": "f2e6f24651f62b28ccfb412180baca0aa7baf96a", + "repo": "faceswap", + "path": "lib/vgg_face.py", + "file_name": "vgg_face.py", + "fun_name": "get_model", + "commit_message": "Centralize model storage", + "code": "def get_model(self, git_model_id, model_filename, backend):\n \n model = GetModel(model_filename, git_model_id).model_path\n model = cv2.dnn.readNetFromCaffe(model[1], model[0])\n model.setPreferableTarget(self.get_backend(backend))\n return model\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 51, + "n_words": 16, + "vocab_size": 13, + "complexity": 1, + "nloc": 5, + "token_counts": 53, + "n_ast_nodes": 81, + "n_identifiers": 13, + "d_id": 20368, + "documentation": { + "docstring": " Check if model is available, if not, download and unzip it ", + "n_words": 11, + "vocab_size": 10, + "n_whitespaces": 12, + "language": "en" + } + }, + { + "id": 219662, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/_pydecimal.py", + "file_name": "_pydecimal.py", + "fun_name": "copy_sign", + "commit_message": "add python 3.10.4 for windows", + "code": "def copy_sign(self, a, b):\n \n a = _convert_other(a, raiseit=True)\n return a.copy_sign(b)\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 31, + "n_words": 10, + "vocab_size": 10, + "complexity": 1, + "nloc": 3, + "token_counts": 27, + "n_ast_nodes": 43, + "n_identifiers": 6, + "d_id": 55690, + "documentation": { + "docstring": "Copies the second operand's sign to the first one.\n\n In detail, it returns a copy of the first operand with the sign\n equal to the sign of the second operand.\n\n >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))\n Decimal('1.50')\n >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))\n Decimal('1.50')\n >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))\n Decimal('-1.50')\n >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))\n Decimal('-1.50')\n >>> ExtendedContext.copy_sign(1, -2)\n Decimal('-1')\n >>> ExtendedContext.copy_sign(Decimal(1), -2)\n Decimal('-1')\n >>> ExtendedContext.copy_sign(1, Decimal(-2))\n Decimal('-1')\n ", + "n_words": 60, + "vocab_size": 32, + "n_whitespaces": 179, + "language": "en" + } + }, + { + "id": 244039, + "commit_id": "cac356380d505bf15587f07c0529218cc36b9652", + "repo": "mmdetection", + "path": "mmdet/models/dense_heads/maskformer_head.py", + "file_name": "maskformer_head.py", + "fun_name": "forward", + "commit_message": "[Feature] Add Maskformer to mmdet (#7212)\n\n* first commit\r\n\r\n* add README\r\n\r\n* move model description from config to readme\r\n\r\nadd description for binary_input\r\n\r\nadd description for dice loss\r\n\r\nadd a independent panoptic gt processing function\r\n\r\nadd a independent panoptic gt processing function\r\n\r\nremove compatibility of pretrain in maskformer\r\n\r\n* update comments in maskformer_head\r\n\r\n* update docs format", + "code": "def forward(self, feats, img_metas):\n \n batch_size = len(img_metas)\n input_img_h, input_img_w = img_metas[0]['batch_input_shape']\n padding_mask = feats[-1].new_ones(\n (batch_size, input_img_h, input_img_w), dtype=torch.float32)\n for i in range(batch_size):\n img_h, img_w, _ = img_metas[i]['img_shape']\n padding_mask[i, :img_h, :img_w] = 0\n padding_mask = F.interpolate(\n padding_mask.unsqueeze(1),\n size=feats[-1].shape[-2:],\n mode='nearest').to(torch.bool).squeeze(1)\n # when backbone is swin, memory is output of last stage of swin.\n # when backbone is r50, memory is output of tranformer encoder.\n mask_features, memory = self.pixel_decoder(feats, img_metas)\n pos_embed = self.decoder_pe(padding_mask)\n memory = self.decoder_input_proj(memory)\n # shape (batch_size, c, h, w) -> (h*w, batch_size, c)\n memory = memory.flatten(2).permute(2, 0, 1)\n pos_embed = pos_embed.flatten(2).permute(2, 0, 1)\n # shape (batch_size, h * w)\n padding_mask = padding_mask.flatten(1)\n # shape = (num_queries, embed_dims)\n query_embed = self.query_embed.weight\n # shape = (num_queries, batch_size, embed_dims)\n query_embed = query_embed.unsqueeze(1).repeat(1, batch_size, 1)\n target = torch.zeros_like(query_embed)\n # shape (num_decoder, num_queries, batch_size, embed_dims)\n out_dec = self.transformer_decoder(\n query=target,\n key=memory,\n value=memory,\n key_pos=pos_embed,\n query_pos=query_embed,\n key_padding_mask=padding_mask)\n # shape (num_decoder, batch_size, num_queries, embed_dims)\n out_dec = out_dec.transpose(1, 2)\n\n # cls_scores\n all_cls_scores = self.cls_embed(out_dec)\n\n # mask_preds\n mask_embed = self.mask_embed(out_dec)\n all_mask_preds = torch.einsum('lbqc,bchw->lbqhw', mask_embed,\n mask_features)\n\n return all_cls_scores, all_mask_preds\n", + "url": "https://github.com/open-mmlab/mmdetection.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 553, + "n_words": 167, + "vocab_size": 97, + "complexity": 2, + "nloc": 34, + "token_counts": 302, + "n_ast_nodes": 475, + "n_identifiers": 54, + "d_id": 70209, + "documentation": { + "docstring": "Forward function.\n\n Args:\n feats (list[Tensor]): Features from the upstream network, each\n is a 4D-tensor.\n img_metas (list[dict]): List of image information.\n\n Returns:\n all_cls_scores (Tensor): Classification scores for each\\\n scale level. Each is a 4D-tensor with shape\\\n (num_decoder, batch_size, num_queries, cls_out_channels).\\\n Note `cls_out_channels` should includes background.\n all_mask_preds (Tensor): Mask scores for each decoder\\\n layer. Each with shape (num_decoder, batch_size,\\\n num_queries, h, w).\n ", + "n_words": 60, + "vocab_size": 50, + "n_whitespaces": 215, + "language": "en" + } + }, + { + "id": 321379, + "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", + "repo": "qutebrowser", + "path": "tests/unit/keyinput/test_keyutils.py", + "file_name": "test_keyutils.py", + "fun_name": "test_text", + "commit_message": "Run scripts/dev/rewrite_enums.py", + "code": "def test_text(self, qt_key, upper):\n \n modifiers = Qt.KeyboardModifier.ShiftModifier if upper else Qt.KeyboardModifiers()\n info = keyutils.KeyInfo(qt_key.member, modifiers=modifiers)\n expected = qt_key.uppertext if upper else qt_key.text\n assert info.text() == expected\n", + "url": "https://github.com/qutebrowser/qutebrowser.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 61, + "n_words": 26, + "vocab_size": 20, + "complexity": 3, + "nloc": 5, + "token_counts": 58, + "n_ast_nodes": 90, + "n_identifiers": 16, + "d_id": 117685, + "documentation": { + "docstring": "Test KeyInfo.text() with all possible keys.\n\n See key_data.py for inputs and expected values.\n ", + "n_words": 13, + "vocab_size": 13, + "n_whitespaces": 27, + "language": "en" + } + }, + { + "id": 215977, + "commit_id": "f2a783643de61cac1ff3288b40241e5ce6e1ddc8", + "repo": "salt", + "path": "salt/states/linux_acl.py", + "file_name": "linux_acl.py", + "fun_name": "present", + "commit_message": "Update to latest ``pyupgrade`` hook. Stop skipping it on CI.\n\nSigned-off-by: Pedro Algarvio ", + "code": "def present(name, acl_type, acl_name=\"\", perms=\"\", recurse=False, force=False):\n \n ret = {\"name\": name, \"result\": True, \"changes\": {}, \"comment\": \"\"}\n\n _octal = {\"r\": 4, \"w\": 2, \"x\": 1, \"-\": 0}\n _octal_lookup = {0: \"-\", 1: \"r\", 2: \"w\", 4: \"x\"}\n\n if not os.path.exists(name):\n ret[\"comment\"] = \"{} does not exist\".format(name)\n ret[\"result\"] = False\n return ret\n\n __current_perms = __salt__[\"acl.getfacl\"](name, recursive=recurse)\n\n if acl_type.startswith((\"d:\", \"default:\")):\n _acl_type = \":\".join(acl_type.split(\":\")[1:])\n _current_perms = __current_perms[name].get(\"defaults\", {})\n _default = True\n else:\n _acl_type = acl_type\n _current_perms = __current_perms[name]\n _default = False\n\n # The getfacl execution module lists default with empty names as being\n # applied to the user/group that owns the file, e.g.,\n # default:group::rwx would be listed as default:group:root:rwx\n # In this case, if acl_name is empty, we really want to search for root\n # but still uses '' for other\n\n # We search through the dictionary getfacl returns for the owner of the\n # file if acl_name is empty.\n if acl_name == \"\":\n _search_name = __current_perms[name].get(\"comment\").get(_acl_type, \"\")\n else:\n _search_name = acl_name\n\n if _current_perms.get(_acl_type, None) or _default:\n try:\n user = [\n i\n for i in _current_perms[_acl_type]\n if next(iter(i.keys())) == _search_name\n ].pop()\n except (AttributeError, IndexError, StopIteration, KeyError):\n user = None\n\n if user:\n octal_sum = sum(_octal.get(i, i) for i in perms)\n need_refresh = False\n # If recursive check all paths retrieved via acl.getfacl\n if recurse:\n for path in __current_perms:\n acl_found = False\n if _default:\n # Recusive default acls only apply to directories\n if not os.path.isdir(path):\n continue\n _current_perms_path = __current_perms[path].get(\"defaults\", {})\n else:\n _current_perms_path = __current_perms[path]\n for user_acl in _current_perms_path.get(_acl_type, []):\n if (\n _search_name in user_acl\n and user_acl[_search_name][\"octal\"] == octal_sum\n ):\n acl_found = True\n if not acl_found:\n need_refresh = True\n break\n\n # Check the permissions from the already located file\n elif user[_search_name][\"octal\"] == sum(_octal.get(i, i) for i in perms):\n need_refresh = False\n # If they don't match then refresh\n else:\n need_refresh = True\n\n if not need_refresh:\n ret[\"comment\"] = \"Permissions are in the desired state\"\n else:\n _num = user[_search_name][\"octal\"]\n new_perms = \"{}{}{}\".format(\n _octal_lookup[_num & 1],\n _octal_lookup[_num & 2],\n _octal_lookup[_num & 4],\n )\n changes = {\n \"new\": {\"acl_name\": acl_name, \"acl_type\": acl_type, \"perms\": perms},\n \"old\": {\n \"acl_name\": acl_name,\n \"acl_type\": acl_type,\n \"perms\": new_perms,\n },\n }\n\n if __opts__[\"test\"]:\n ret.update(\n {\n \"comment\": (\n \"Updated permissions will be applied for \"\n \"{}: {} -> {}\".format(acl_name, new_perms, perms)\n ),\n \"result\": None,\n \"changes\": changes,\n }\n )\n return ret\n try:\n if force:\n __salt__[\"acl.wipefacls\"](\n name, recursive=recurse, raise_err=True\n )\n\n __salt__[\"acl.modfacl\"](\n acl_type,\n acl_name,\n perms,\n name,\n recursive=recurse,\n raise_err=True,\n )\n ret.update(\n {\n \"comment\": \"Updated permissions for {}\".format(acl_name),\n \"result\": True,\n \"changes\": changes,\n }\n )\n except CommandExecutionError as exc:\n ret.update(\n {\n \"comment\": \"Error updating permissions for {}: {}\".format(\n acl_name, exc.strerror\n ),\n \"result\": False,\n }\n )\n else:\n changes = {\n \"new\": {\"acl_name\": acl_name, \"acl_type\": acl_type, \"perms\": perms}\n }\n\n if __opts__[\"test\"]:\n ret.update(\n {\n \"comment\": \"New permissions will be applied for {}: {}\".format(\n acl_name, perms\n ),\n \"result\": None,\n \"changes\": changes,\n }\n )\n ret[\"result\"] = None\n return ret\n\n try:\n if force:\n __salt__[\"acl.wipefacls\"](name, recursive=recurse, raise_err=True)\n\n __salt__[\"acl.modfacl\"](\n acl_type, acl_name, perms, name, recursive=recurse, raise_err=True\n )\n ret.update(\n {\n \"comment\": \"Applied new permissions for {}\".format(acl_name),\n \"result\": True,\n \"changes\": changes,\n }\n )\n except CommandExecutionError as exc:\n ret.update(\n {\n \"comment\": \"Error updating permissions for {}: {}\".format(\n acl_name, exc.strerror\n ),\n \"result\": False,\n }\n )\n\n else:\n ret[\"comment\"] = \"ACL Type does not exist\"\n ret[\"result\"] = False\n\n return ret\n\n", + "url": "https://github.com/saltstack/salt.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 23, + "n_whitespaces": 3212, + "n_words": 522, + "vocab_size": 248, + "complexity": 28, + "nloc": 155, + "token_counts": 807, + "n_ast_nodes": 1365, + "n_identifiers": 51, + "d_id": 54297, + "documentation": { + "docstring": "\n Ensure a Linux ACL is present\n\n name\n The acl path\n\n acl_type\n The type of the acl is used for it can be 'user' or 'group'\n\n acl_name\n The user or group\n\n perms\n Set the permissions eg.: rwx\n\n recurse\n Set the permissions recursive in the path\n\n force\n Wipe out old permissions and ensure only the new permissions are set\n ", + "n_words": 57, + "vocab_size": 43, + "n_whitespaces": 125, + "language": "en" + } + }, + { + "id": 92391, + "commit_id": "c4cc0467974bcfb2b3c95120bd19c337aa977183", + "repo": "sentry", + "path": "tests/sentry/sentry_metrics/test_postgres_indexer.py", + "file_name": "test_postgres_indexer.py", + "fun_name": "test_rate_limited", + "commit_message": "feat(metrics_indexer): Add rate limits functionality to indexer [INGEST-1380] (#36263)\n\n* feat(metrics_indexer): Add rate limits functionality to indexer [INGEST-1380]\r\n\r\nThe postgres string indexer now is able to rate limit writes using four\r\nsentry options. If that happens, `None` is returned in place of an\r\ninteger, and the FetchType is RATE_LIMITED.\r\n\r\nThe kafka consumer/message processor explicitly checks for those `None`\r\nvalues and throws away every message that references a rate-limited\r\nstring. It logs a Sentry error for every dropped message just because\r\nthat's already what we do for other kinds of dropped messages.\r\n\r\nRate limiting and quota management currently creates a ton of\r\ndataclasses and that probably wastes time. There are a ton of\r\nlow-hanging fruits:\r\n\r\n* the return value of _construct_quotas could be globally cached, as\r\n long as the cache is wiped when the sentry options change.\r\n\r\n* the same Quota object (for global limits) is referenced from multiple\r\n RequestedQuota instances (one for each org).\r\n `sentry.ratelimits.sliding_windows` could check the `id()` of the\r\n quota (if there is no prefix override) to avoid computing and checking\r\n the same quota multiple times.\r\n\r\nAn even lower hanging fruit is that we're fetching the same keys from\r\nRedis multiple times, because multiple organizations (and therefore\r\nmultiple RequestedQuota instances) adhere to the global quota. So that's\r\nbeen fixed, but as for the rest let's wait for timings from prod.\r\n\r\n* fix typo\r\n\r\n* fix typing\r\n\r\n* apply review feedback\r\n\r\n* fix typing, add test\r\n\r\n* fix tests\r\n\r\n* apply review feedback about logging too many msgs\r\n\r\n* fix leaking option in test\r\n\r\n* sike, more test failures", + "code": "def test_rate_limited(self):\n \n org_strings = {1: {\"a\", \"b\", \"c\"}, 2: {\"e\", \"f\"}, 3: {\"g\"}}\n\n with override_options(\n {\n \"sentry-metrics.writes-limiter.limits.releasehealth.per-org\": [\n {\"window_seconds\": 10, \"granularity_seconds\": 10, \"limit\": 1}\n ],\n }\n ):\n results = self.indexer.bulk_record(\n use_case_id=self.use_case_id, org_strings=org_strings\n )\n\n assert len(results[1]) == 3\n assert len(results[2]) == 2\n assert len(results[3]) == 1\n assert results[3][\"g\"] is not None\n\n rate_limited_strings = set()\n\n for org_id in 1, 2, 3:\n for k, v in results[org_id].items():\n if v is None:\n rate_limited_strings.add(k)\n\n assert len(rate_limited_strings) == 3\n assert \"g\" not in rate_limited_strings\n\n for string in rate_limited_strings:\n assert results.get_fetch_metadata()[string] == (\n None,\n FetchType.RATE_LIMITED,\n FetchTypeExt(is_global=False),\n )\n\n org_strings = {1: rate_limited_strings}\n\n with override_options(\n {\n \"sentry-metrics.writes-limiter.limits.releasehealth.global\": [\n {\"window_seconds\": 10, \"granularity_seconds\": 10, \"limit\": 2}\n ],\n }\n ):\n results = self.indexer.bulk_record(\n use_case_id=self.use_case_id, org_strings=org_strings\n )\n\n rate_limited_strings2 = set()\n for k, v in results[1].items():\n if v is None:\n rate_limited_strings2.add(k)\n\n assert len(rate_limited_strings2) == 1\n assert len(rate_limited_strings - rate_limited_strings2) == 2\n\n", + "url": "https://github.com/getsentry/sentry.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 631, + "n_words": 137, + "vocab_size": 75, + "complexity": 7, + "nloc": 46, + "token_counts": 294, + "n_ast_nodes": 476, + "n_identifiers": 23, + "d_id": 18908, + "documentation": { + "docstring": "\n Assert that rate limits per-org and globally are applied at all.\n\n Since we don't have control over ordering in sets/dicts, we have no\n control over which string gets rate-limited. That makes assertions\n quite awkward and imprecise.\n ", + "n_words": 36, + "vocab_size": 31, + "n_whitespaces": 72, + "language": "en" + } + }, + { + "id": 34808, + "commit_id": "44b21f117bcf71e3d88a11c3523c94b27949fdbf", + "repo": "transformers", + "path": "src/transformers/modeling_flax_utils.py", + "file_name": "modeling_flax_utils.py", + "fun_name": "register_for_auto_class", + "commit_message": "Save code of registered custom models (#15379)\n\n* Allow dynamic modules to use relative imports\r\n\r\n* Work for configs\r\n\r\n* Fix last merge conflict\r\n\r\n* Save code of registered custom objects\r\n\r\n* Map strings to strings\r\n\r\n* Fix test\r\n\r\n* Add tokenizer\r\n\r\n* Rework tests\r\n\r\n* Tests\r\n\r\n* Ignore fixtures py files for tests\r\n\r\n* Tokenizer test + fix collection\r\n\r\n* With full path\r\n\r\n* Rework integration\r\n\r\n* Fix typo\r\n\r\n* Remove changes in conftest\r\n\r\n* Test for tokenizers\r\n\r\n* Add documentation\r\n\r\n* Update docs/source/custom_models.mdx\r\n\r\nCo-authored-by: Lysandre Debut \r\n\r\n* Add file structure and file content\r\n\r\n* Add more doc\r\n\r\n* Style\r\n\r\n* Update docs/source/custom_models.mdx\r\n\r\nCo-authored-by: Suraj Patil \r\n\r\n* Address review comments\r\n\r\nCo-authored-by: Lysandre Debut \r\nCo-authored-by: Suraj Patil ", + "code": "def register_for_auto_class(cls, auto_class=\"FlaxAutoModel\"):\n \n if not isinstance(auto_class, str):\n auto_class = auto_class.__name__\n\n import transformers.models.auto as auto_module\n\n if not hasattr(auto_module, auto_class):\n raise ValueError(f\"{auto_class} is not a valid auto class.\")\n\n cls._auto_class = auto_class\n\n\n# To update the docstring, we need to copy the method, otherwise we change the original docstring.\nFlaxPreTrainedModel.push_to_hub = copy_func(FlaxPreTrainedModel.push_to_hub)\nFlaxPreTrainedModel.push_to_hub.__doc__ = FlaxPreTrainedModel.push_to_hub.__doc__.format(\n object=\"model\", object_class=\"FlaxAutoModel\", object_files=\"model checkpoint\"\n)\n\n", + "url": "https://github.com/huggingface/transformers.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 113, + "n_words": 57, + "vocab_size": 47, + "complexity": 3, + "nloc": 7, + "token_counts": 52, + "n_ast_nodes": 150, + "n_identifiers": 21, + "d_id": 6342, + "documentation": { + "docstring": "\n Register this class with a given auto class. This should only be used for custom models as the ones in the\n library are already mapped with an auto class.\n\n Args:\n auto_class (`str` or `type`, *optional*, defaults to `\"FlaxAutoModel\"`):\n The auto class to register this new model with.\n ", + "n_words": 47, + "vocab_size": 39, + "n_whitespaces": 102, + "language": "en" + } + }, + { + "id": 70498, + "commit_id": "4248d406c011d6ba6207bb0e0e9b885813d961be", + "repo": "wagtail", + "path": "wagtail/search/backends/database/__init__.py", + "file_name": "__init__.py", + "fun_name": "SearchBackend", + "commit_message": "Test for presence of fts5 extension in sqlite backend initialisation and migration", + "code": "def SearchBackend(params):\n \n if connection.vendor == 'postgresql':\n from .postgres.postgres import PostgresSearchBackend\n return PostgresSearchBackend(params)\n elif connection.vendor == 'mysql':\n from .mysql.mysql import MySQLSearchBackend\n return MySQLSearchBackend(params)\n elif connection.vendor == 'sqlite':\n from .sqlite.utils import fts5_available\n if fts5_available():\n from .sqlite.sqlite import SQLiteSearchBackend\n return SQLiteSearchBackend(params)\n else:\n from .fallback import DatabaseSearchBackend\n return DatabaseSearchBackend(params)\n else:\n from .fallback import DatabaseSearchBackend\n return DatabaseSearchBackend(params)\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 174, + "n_words": 52, + "vocab_size": 28, + "complexity": 5, + "nloc": 18, + "token_counts": 99, + "n_ast_nodes": 177, + "n_identifiers": 14, + "d_id": 15513, + "documentation": { + "docstring": "\n Returns the appropriate search backend for the current 'default' database system\n ", + "n_words": 11, + "vocab_size": 10, + "n_whitespaces": 18, + "language": "en" + } + }, + { + "id": 258535, + "commit_id": "330881a21ca48c543cc8a67aa0d4e4c1dc1001ab", + "repo": "scikit-learn", + "path": "sklearn/neighbors/tests/test_nca.py", + "file_name": "test_nca.py", + "fun_name": "test_nca_feature_names_out", + "commit_message": "ENH Adds get_feature_names_out to neighbors module (#22212)\n\nCo-authored-by: Olivier Grisel ", + "code": "def test_nca_feature_names_out():\n \n\n X = iris_data\n y = iris_target\n\n est = NeighborhoodComponentsAnalysis().fit(X, y)\n names_out = est.get_feature_names_out()\n\n class_name_lower = est.__class__.__name__.lower()\n expected_names_out = np.array(\n [f\"{class_name_lower}{i}\" for i in range(est.components_.shape[1])],\n dtype=object,\n )\n assert_array_equal(names_out, expected_names_out)\n", + "url": "https://github.com/scikit-learn/scikit-learn.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 71, + "n_words": 30, + "vocab_size": 25, + "complexity": 2, + "nloc": 11, + "token_counts": 77, + "n_ast_nodes": 132, + "n_identifiers": 24, + "d_id": 75280, + "documentation": { + "docstring": "Check `get_feature_names_out` for `NeighborhoodComponentsAnalysis`.", + "n_words": 4, + "vocab_size": 4, + "n_whitespaces": 3, + "language": "en" + } + }, + { + "id": 257593, + "commit_id": "632cd1c141a8b485c6ef8695685d2d8eef3ca50f", + "repo": "haystack", + "path": "rest_api/test/test_rest_api.py", + "file_name": "test_rest_api.py", + "fun_name": "test_query_with_bool_in_params", + "commit_message": "Allow values that are not dictionaries in the request params in the `/search` endpoint (#2720)\n\n* let params contain something else than dictionaries\r\n\r\n* rewrite the test same style as the main branch", + "code": "def test_query_with_bool_in_params(client):\n \n with mock.patch(\"rest_api.controller.search.query_pipeline\") as mocked_pipeline:\n # `run` must return a dictionary containing a `query` key\n mocked_pipeline.run.return_value = {\"query\": TEST_QUERY}\n request_body = {\n \"query\": TEST_QUERY,\n \"params\": {\"debug\": True, \"Retriever\": {\"top_k\": 5}, \"Reader\": {\"top_k\": 3}},\n }\n response = client.post(url=\"/query\", json=request_body)\n assert 200 == response.status_code\n response_json = response.json()\n assert response_json[\"documents\"] == []\n assert response_json[\"answers\"] == []\n\n", + "url": "https://github.com/deepset-ai/haystack.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 145, + "n_words": 54, + "vocab_size": 44, + "complexity": 1, + "nloc": 12, + "token_counts": 102, + "n_ast_nodes": 185, + "n_identifiers": 15, + "d_id": 75098, + "documentation": { + "docstring": "\n Ensure items of params can be other types than dictionary, see\n https://github.com/deepset-ai/haystack/issues/2656\n ", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 22, + "language": "en" + } + }, + { + "id": 100531, + "commit_id": "bdbbad4d310fb606b6f412aa81e9f57ccd994e97", + "repo": "faceswap", + "path": "lib/cli/launcher.py", + "file_name": "launcher.py", + "fun_name": "_setup_amd", + "commit_message": "Refactor lib.gpu_stats (#1218)\n\n* inital gpu_stats refactor\r\n\r\n* Add dummy CPU Backend\r\n\r\n* Update Sphinx documentation", + "code": "def _setup_amd(cls, arguments):\n \n logger.debug(\"Setting up for AMD\")\n try:\n import plaidml # noqa pylint:disable=unused-import,import-outside-toplevel\n except ImportError:\n logger.error(\"PlaidML not found. Run `pip install plaidml-keras` for AMD support\")\n return False\n from lib.gpu_stats import setup_plaidml # pylint:disable=import-outside-toplevel\n setup_plaidml(arguments.loglevel, arguments.exclude_gpus)\n logger.debug(\"setup up for PlaidML\")\n return True\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 132, + "n_words": 41, + "vocab_size": 35, + "complexity": 2, + "nloc": 11, + "token_counts": 53, + "n_ast_nodes": 96, + "n_identifiers": 13, + "d_id": 19995, + "documentation": { + "docstring": " Test for plaidml and perform setup for AMD.\n\n Parameters\n ----------\n arguments: :class:`argparse.Namespace`\n The command line arguments passed to Faceswap.\n ", + "n_words": 19, + "vocab_size": 18, + "n_whitespaces": 59, + "language": "en" + } + }, + { + "id": 176550, + "commit_id": "5a7985fc41bc0c686c035de43c66cf4fb5fcc94f", + "repo": "networkx", + "path": "networkx/algorithms/tree/recognition.py", + "file_name": "recognition.py", + "fun_name": "is_arborescence", + "commit_message": "Added examples in tournament and tree functions (#5536)\n\n* examples\r\n\r\n* examples\r\n\r\n* examples\r\n\r\n* Example changed\r\n\r\n* improved styling\r\n\r\n* revised\r\n\r\n* edge labels\r\n\r\n* improved styling\r\n\r\n* spacing\r\n\r\n* error testing\r\n\r\n* examples\r\n\r\n* styling\r\n\r\n* add_nodes removed\r\n\r\n* spaceing\r\n\r\n* spacing\r\n\r\n* spacing\r\n\r\n* added examples\r\n\r\n* removed random_tournament example\r\n\r\n* added examples in branching and aborescence\r\n\r\n* error removed", + "code": "def is_arborescence(G):\n \n return is_tree(G) and max(d for n, d in G.in_degree()) <= 1\n\n\n@nx.utils.not_implemented_for(\"undirected\")", + "url": "https://github.com/networkx/networkx.git", + "language": "Python", + "ast_errors": "@nx.utils.not_implemented_for(\"undirected\")", + "n_ast_errors": 1, + "ast_levels": 12, + "n_whitespaces": 19, + "n_words": 14, + "vocab_size": 14, + "complexity": 3, + "nloc": 2, + "token_counts": 28, + "n_ast_nodes": 64, + "n_identifiers": 10, + "d_id": 41959, + "documentation": { + "docstring": "\n Returns True if `G` is an arborescence.\n\n An arborescence is a directed tree with maximum in-degree equal to 1.\n\n Parameters\n ----------\n G : graph\n The graph to test.\n\n Returns\n -------\n b : bool\n A boolean that is True if `G` is an arborescence.\n\n Examples\n --------\n >>> G = nx.DiGraph([(0, 1), (0, 2), (2, 3), (3, 4)])\n >>> nx.is_arborescence(G)\n True\n >>> G.remove_edge(0, 1)\n >>> G.add_edge(1, 2) # maximum in-degree is 2\n >>> nx.is_arborescence(G)\n False\n\n Notes\n -----\n In another convention, an arborescence is known as a *tree*.\n\n See Also\n --------\n is_tree\n\n ", + "n_words": 89, + "vocab_size": 62, + "n_whitespaces": 177, + "language": "en" + } + }, + { + "id": 248167, + "commit_id": "116a4c8340b729ffde43be33df24d417384cb28b", + "repo": "synapse", + "path": "tests/rest/client/test_sync.py", + "file_name": "test_sync.py", + "fun_name": "test_public_receipt_can_override_private", + "commit_message": "Implement changes to MSC2285 (hidden read receipts) (#12168)\n\n* Changes hidden read receipts to be a separate receipt type\r\n (instead of a field on `m.read`).\r\n* Updates the `/receipts` endpoint to accept `m.fully_read`.", + "code": "def test_public_receipt_can_override_private(self) -> None:\n \n # Send a message as the first user\n res = self.helper.send(self.room_id, body=\"hello\", tok=self.tok)\n\n # Send a private read receipt\n channel = self.make_request(\n \"POST\",\n f\"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}\",\n {},\n access_token=self.tok2,\n )\n self.assertEqual(channel.code, 200)\n self.assertIsNone(self._get_read_receipt())\n\n # Send a public read receipt\n channel = self.make_request(\n \"POST\",\n f\"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}\",\n {},\n access_token=self.tok2,\n )\n self.assertEqual(channel.code, 200)\n\n # Test that we did override the private read receipt\n self.assertNotEqual(self._get_read_receipt(), None)\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 248, + "n_words": 62, + "vocab_size": 39, + "complexity": 1, + "nloc": 22, + "token_counts": 114, + "n_ast_nodes": 232, + "n_identifiers": 20, + "d_id": 72135, + "documentation": { + "docstring": "\n Sending a public read receipt to the same event which has a private read\n receipt should cause that receipt to become public.\n ", + "n_words": 22, + "vocab_size": 17, + "n_whitespaces": 44, + "language": "en" + } + }, + { + "id": 108693, + "commit_id": "7bafb8be7c6e81180e9518a91d10da9422321a0c", + "repo": "matplotlib", + "path": "lib/matplotlib/backends/backend_pgf.py", + "file_name": "backend_pgf.py", + "fun_name": "_print_pgf_to_fh", + "commit_message": "Deprecate internal use of get/set dpi", + "code": "def _print_pgf_to_fh(self, fh, *, bbox_inches_restore=None):\n\n header_text = \n\n # append the preamble used by the backend as a comment for debugging\n header_info_preamble = [\"%% Matplotlib used the following preamble\"]\n for line in _get_preamble().splitlines():\n header_info_preamble.append(\"%% \" + line)\n header_info_preamble.append(\"%%\")\n header_info_preamble = \"\\n\".join(header_info_preamble)\n\n # get figure size in inch\n w, h = self.figure.get_figwidth(), self.figure.get_figheight()\n dpi = self.figure.dpi\n\n # create pgfpicture environment and write the pgf code\n fh.write(header_text)\n fh.write(header_info_preamble)\n fh.write(\"\\n\")\n _writeln(fh, r\"\\begingroup\")\n _writeln(fh, r\"\\makeatletter\")\n _writeln(fh, r\"\\begin{pgfpicture}\")\n _writeln(fh,\n r\"\\pgfpathrectangle{\\pgfpointorigin}{\\pgfqpoint{%fin}{%fin}}\"\n % (w, h))\n _writeln(fh, r\"\\pgfusepath{use as bounding box, clip}\")\n renderer = MixedModeRenderer(self.figure, w, h, dpi,\n RendererPgf(self.figure, fh),\n bbox_inches_restore=bbox_inches_restore)\n self.figure.draw(renderer)\n\n # end the pgfpicture environment\n _writeln(fh, r\"\\end{pgfpicture}\")\n _writeln(fh, r\"\\makeatother\")\n _writeln(fh, r\"\\endgroup\")\n", + "url": "https://github.com/matplotlib/matplotlib.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 389, + "n_words": 104, + "vocab_size": 77, + "complexity": 2, + "nloc": 46, + "token_counts": 195, + "n_ast_nodes": 327, + "n_identifiers": 23, + "d_id": 23310, + "documentation": { + "docstring": "%% Creator: Matplotlib, PGF backend\n%%\n%% To include the figure in your LaTeX document, write\n%% \\\\input{.pgf}\n%%\n%% Make sure the required packages are loaded in your preamble\n%% \\\\usepackage{pgf}\n%%\n%% Also ensure that all the required font packages are loaded; for instance,\n%% the lmodern package is sometimes necessary when using math font.\n%% \\\\usepackage{lmodern}\n%%\n%% Figures using additional raster images can only be included by \\\\input if\n%% they are in the same directory as the main LaTeX file. For loading figures\n%% from other directories you can use the `import` package\n%% \\\\usepackage{import}\n%%\n%% and then include the figures with\n%% \\\\import{}{.pgf}\n%%\n", + "n_words": 113, + "vocab_size": 74, + "n_whitespaces": 103, + "language": "en" + } + }, + { + "id": 130580, + "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", + "repo": "ray", + "path": "python/ray/data/dataset.py", + "file_name": "dataset.py", + "fun_name": "to_pandas_refs", + "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", + "code": "def to_pandas_refs(self) -> List[ObjectRef[\"pandas.DataFrame\"]]:\n \n\n block_to_df = cached_remote_fn(_block_to_df)\n return [block_to_df.remote(block) for block in self._blocks.get_blocks()]\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 34, + "n_words": 13, + "vocab_size": 13, + "complexity": 2, + "nloc": 15, + "token_counts": 39, + "n_ast_nodes": 65, + "n_identifiers": 11, + "d_id": 29316, + "documentation": { + "docstring": "Convert this dataset into a distributed set of Pandas dataframes.\n\n This is only supported for datasets convertible to Arrow records.\n This function induces a copy of the data. For zero-copy access to the\n underlying data, consider using ``.to_arrow()`` or\n ``.get_internal_block_refs()``.\n\n Time complexity: O(dataset size / parallelism)\n\n Returns:\n A list of remote Pandas dataframes created from this dataset.\n ", + "n_words": 57, + "vocab_size": 49, + "n_whitespaces": 117, + "language": "en" + } + }, + { + "id": 275252, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/optimizers/optimizer_experimental/adamw.py", + "file_name": "adamw.py", + "fun_name": "build", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def build(self, var_list, exclude_from_weight_decay=None):\n \n super().build(var_list)\n if hasattr(self, \"_built\") and self._built:\n return\n self._built = True\n if not hasattr(self, \"_exclude_from_weight_decay\"):\n self._exclude_from_weight_decay = exclude_from_weight_decay or []\n self._momentums = []\n self._velocities = []\n for var in var_list:\n self._momentums.append(\n self.add_variable_from_reference(\n model_variable=var, variable_name=\"m\"\n )\n )\n self._velocities.append(\n self.add_variable_from_reference(\n model_variable=var, variable_name=\"v\"\n )\n )\n if self.amsgrad:\n self._velocity_hats = []\n for var in var_list:\n self._velocity_hats.append(\n self.add_variable_from_reference(\n model_variable=var, variable_name=\"vhat\"\n )\n )\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 400, + "n_words": 60, + "vocab_size": 37, + "complexity": 8, + "nloc": 28, + "token_counts": 145, + "n_ast_nodes": 238, + "n_identifiers": 17, + "d_id": 81348, + "documentation": { + "docstring": "Initialize optimizer variables.\n\n AdamW optimizer has 3 types of variables: momentums, velocities and\n velocity_hat (only set when amsgrad is applied),\n\n Args:\n var_list: list of model variables to build AdamW variables on.\n exclude_from_weight_decay: list of model variables that will be excluded\n from weight decay.\n ", + "n_words": 43, + "vocab_size": 35, + "n_whitespaces": 100, + "language": "en" + } + }, + { + "id": 312993, + "commit_id": "243d003acc11d638feb3867410c3cbb1987520bc", + "repo": "core", + "path": "homeassistant/components/moehlenhoff_alpha2/config_flow.py", + "file_name": "config_flow.py", + "fun_name": "validate_input", + "commit_message": "Add Moehlenhoff Alpha2 underfloor heating system integration (#42771)\n\n* Add Moehlenhoff Alpha2 underfloor heating system integration\r\n\r\n* isort changes\r\n\r\n* flake8 changes\r\n\r\n* Do not exclude config_flow.py\r\n\r\n* pylint changes\r\n\r\n* Add config_flow test\r\n\r\n* correct requirements_test_all.txt\r\n\r\n* more tests\r\n\r\n* Update test description\r\n\r\n* Test connection and catch TimeoutError in async_setup_entry\r\n\r\n* Add version to manifest file\r\n\r\n* Remove version from manifest file\r\n\r\n* Replace tests.async_mock.patch by unittest.mock.patch\r\n\r\n* Update moehlenhoff-alpha2 to version 1.0.1\r\n\r\n* Update requirements for moehlenhoff-alpha2 1.0.1\r\n\r\n* Update moehlenhoff-alpha2 to 1.0.2\r\n\r\n* Use async_setup_platforms\r\n\r\n* Use async_unload_platforms\r\n\r\n* Separate connection and devices for each entry_id\r\n\r\n* Use async_track_time_interval to schedule updates\r\n\r\n* Check if input is valid before checking uniqueness\r\n\r\n* Move Exception handling to validate_input\r\n\r\n* Catch aiohttp.client_exceptions.ClientConnectorError\r\n\r\n* Remove translation files\r\n\r\n* Mock TimeoutError\r\n\r\n* Fix data update\r\n\r\n* Replace current callback implementation with ha dispatcher\r\n\r\n* Return False in should_poll\r\n\r\n* Remove unused argument\r\n\r\n* Remove CONNECTION_CLASS\r\n\r\n* Use _async_current_entries\r\n\r\n* Call async_schedule_update_ha_state after data update\r\n\r\n* Remove unneeded async_setup\r\n\r\nCo-authored-by: Milan Meulemans \r\n\r\n* Remove unneeded async_setup_platform\r\n\r\nCo-authored-by: Milan Meulemans \r\n\r\n* Set Schema attribute host required\r\n\r\nCo-authored-by: Milan Meulemans \r\n\r\n* Remove unused Exception class\r\n\r\nCo-authored-by: Milan Meulemans \r\n\r\n* Update manifest.json\r\n\r\nCo-authored-by: Milan Meulemans \r\n\r\n* pylint constructor return type None\r\n\r\n* Replace properties by class variables\r\n\r\n* use pass instead of return\r\n\r\n* Remove unused sync update method\r\n\r\n* remove property hvac_action\r\n\r\n* remove pass\r\n\r\n* rework exception handling\r\n\r\n* Update homeassistant/components/moehlenhoff_alpha2/config_flow.py\r\n\r\nCo-authored-by: Milan Meulemans \r\n\r\n* Correct indentation\r\n\r\n* catch Exception in validate_input\r\n\r\n* Replace HomeAssistantType with HomeAssistant\r\n\r\n* Update to moehlenhoff-alpha2 1.0.3\r\n\r\n* Allow to switch between heating and cooling mode\r\n\r\n* Update moehlenhoff-alpha2 to version 1.0.4\r\n\r\n* Update heatarea data after setting target temperature\r\n\r\n* Support hvac_action\r\n\r\n* Fix heatarea update with multiple bases\r\n\r\n* Update data after setting preset mode\r\n\r\n* Use custom preset modes like defined by device\r\n\r\n* Fix config flow test\r\n\r\n* Fix test_duplicate_error\r\n\r\n* Rename property to extra_state_attributes\r\n\r\nRename property device_state_attributes to extra_state_attributes and\r\nreturn lowercase keys in dict.\r\n\r\n* Refactor using DataUpdateCoordinator\r\n\r\n* Remove _attr_should_poll\r\n\r\n* Raise HomeAssistantError on communication error\r\n\r\nCatch HTTPError instead of broad except and reraise as HomeAssistantError\r\n\r\n* Change DataUpdateCoordinator name to alpha2_base\r\n\r\n* Refresh coordinator before setting data\r\n\r\n* Raise ValueError on invalid heat area mode\r\n\r\n* Rename heatarea to heat_area\r\n\r\n* Set type annotation in class attribute\r\n\r\n* Move coordinator to top\r\n\r\n* Move exception handling to the coordinator\r\n\r\n* Use heat_area_id directly\r\n\r\n* Sore get_cooling() result into local var\r\n\r\n* Add explanation of status attributes\r\n\r\nand remove BLOCK_HC\r\n\r\n* Fix pylint warnings\r\n\r\n* from __future__ import annotations\r\n\r\n* Use Platform Enum\r\n\r\n* Move data handling to coordinator\r\n\r\n* Remove property extra_state_attributes\r\n\r\n* Add missing annotations\r\n\r\n* Update moehlenhoff-alpha2 to version 1.1.2\r\n\r\n* Rework tests based on the scaffold template\r\n\r\n* Set also heat/cool/day/night temp with target temp\r\n\r\n* Remove unneeded code from tests\r\n\r\nCo-authored-by: Milan Meulemans ", + "code": "async def validate_input(data):\n \n\n base = Alpha2Base(data[\"host\"])\n try:\n await base.update_data()\n except (aiohttp.client_exceptions.ClientConnectorError, asyncio.TimeoutError):\n return {\"error\": \"cannot_connect\"}\n except Exception: # pylint: disable=broad-except\n _LOGGER.exception(\"Unexpected exception\")\n return {\"error\": \"unknown\"}\n\n # Return info that you want to store in the config entry.\n return {\"title\": base.name}\n\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 90, + "n_words": 40, + "vocab_size": 35, + "complexity": 3, + "nloc": 10, + "token_counts": 65, + "n_ast_nodes": 123, + "n_identifiers": 14, + "d_id": 111624, + "documentation": { + "docstring": "Validate the user input allows us to connect.\n\n Data has the keys from DATA_SCHEMA with values provided by the user.\n ", + "n_words": 20, + "vocab_size": 18, + "n_whitespaces": 26, + "language": "en" + } + }, + { + "id": 101682, + "commit_id": "e5356a417e7c2124e75c4a2994ed604fc0a3cc74", + "repo": "faceswap", + "path": "lib/align/alignments.py", + "file_name": "alignments.py", + "fun_name": "_update", + "commit_message": "Alignments update:\n - Store face embeddings in PNG header when sorting\n - typing + refactor\n - Update alignments keys for 'identity' and 'video_meta' + bump to v2.3\n - General typing fixes", + "code": "def _update(self) -> int:\n \n retval = self.update()\n logger.debug(\"Updated %s: %s\", self.__class__.__name__, retval)\n return retval\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 42, + "n_words": 14, + "vocab_size": 13, + "complexity": 1, + "nloc": 12, + "token_counts": 31, + "n_ast_nodes": 53, + "n_identifiers": 9, + "d_id": 21086, + "documentation": { + "docstring": " Calls the child's :func:`update` method, logs output and sets the\n :attr:`is_updated` flag\n\n Returns\n -------\n int\n The number of items that were updated\n ", + "n_words": 22, + "vocab_size": 21, + "n_whitespaces": 69, + "language": "en" + } + }, + { + "id": 176142, + "commit_id": "0dada08f4eedb104bfa40932b576e44d82218547", + "repo": "edgedb", + "path": "tests/test_edgeql_scope.py", + "file_name": "test_edgeql_scope.py", + "fun_name": "test_edgeql_scope_for_with_computable_01", + "commit_message": "Always include the definition context namespace in computable contexts (#3331)\n\nWe need to include the *original* source namespace in our ctx\r\nnamespace when compiling computables. The current mechanism of trying\r\nto look up in view_sets or failing that using the source namespace\r\nfrom the computable use, but this was failing to find it in some cases\r\nwith FOR.\r\n\r\nFix this by instead directly pulling in the namespace from qlctx. The\r\ninclusion of qlctx's namespace nicely allows us to ditch so later\r\nlogic as well.\r\n\r\nAdditionally we need to merge the namespace into *both* sides in\r\nget_view_map_remapping, to handle cases like referencing a `FOR`\r\nvariable where the current ns doesn't get merged in.\r\n\r\nFixes #3323.", + "code": "async def test_edgeql_scope_for_with_computable_01(self):\n await self.assert_query_result(\n r,\n tb.bag([\n {\"name\": \"Alice\", \"namelen\": 5},\n {\"name\": \"Bob\", \"namelen\": 3},\n {\"name\": \"Carol\", \"namelen\": 5},\n {\"name\": \"Dave\", \"namelen\": 4}\n ])\n )\n", + "url": "https://github.com/edgedb/edgedb.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 131, + "n_words": 25, + "vocab_size": 18, + "complexity": 1, + "nloc": 20, + "token_counts": 60, + "n_ast_nodes": 111, + "n_identifiers": 5, + "d_id": 41718, + "documentation": { + "docstring": "\n with props := (\n for h in User union (\n select h {namelen := len(h.name)}\n )\n )\n select props {\n name,\n namelen\n };\n ", + "n_words": 23, + "vocab_size": 17, + "n_whitespaces": 146, + "language": "en" + } + }, + { + "id": 261613, + "commit_id": "e2e7d75d1e81ce3b67257fcc4cce32ab2d2acd2f", + "repo": "scikit-learn", + "path": "sklearn/manifold/_isomap.py", + "file_name": "_isomap.py", + "fun_name": "transform", + "commit_message": "MAINT Extend dtype preserved common test to check transform (#24982)", + "code": "def transform(self, X):\n \n check_is_fitted(self)\n if self.n_neighbors is not None:\n distances, indices = self.nbrs_.kneighbors(X, return_distance=True)\n else:\n distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True)\n\n # Create the graph of shortest distances from X to\n # training data via the nearest neighbors of X.\n # This can be done as a single array operation, but it potentially\n # takes a lot of memory. To avoid that, use a loop:\n\n n_samples_fit = self.nbrs_.n_samples_fit_\n n_queries = distances.shape[0]\n\n if hasattr(X, \"dtype\") and X.dtype == np.float32:\n dtype = np.float32\n else:\n dtype = np.float64\n\n G_X = np.zeros((n_queries, n_samples_fit), dtype)\n for i in range(n_queries):\n G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0)\n\n G_X **= 2\n G_X *= -0.5\n\n return self.kernel_pca_.transform(G_X)\n", + "url": "https://github.com/scikit-learn/scikit-learn.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 283, + "n_words": 108, + "vocab_size": 85, + "complexity": 5, + "nloc": 18, + "token_counts": 170, + "n_ast_nodes": 261, + "n_identifiers": 27, + "d_id": 76893, + "documentation": { + "docstring": "Transform X.\n\n This is implemented by linking the points X into the graph of geodesic\n distances of the training data. First the `n_neighbors` nearest\n neighbors of X are found in the training data, and from these the\n shortest geodesic distances from each point in X to each point in\n the training data are computed in order to construct the kernel.\n The embedding of X is the projection of this kernel onto the\n embedding vectors of the training set.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_queries, n_features)\n If neighbors_algorithm='precomputed', X is assumed to be a\n distance matrix or a sparse graph of shape\n (n_queries, n_samples_fit).\n\n Returns\n -------\n X_new : array-like, shape (n_queries, n_components)\n X transformed in the new space.\n ", + "n_words": 120, + "vocab_size": 71, + "n_whitespaces": 262, + "language": "en" + } + }, + { + "id": 114694, + "commit_id": "ef0262e95a1e1a5403896088ca3938adb895a8d6", + "repo": "mindsdb", + "path": "mindsdb/integrations/mysql_handler/mysql_handler.py", + "file_name": "mysql_handler.py", + "fun_name": "check_status", + "commit_message": "Add mysql handler", + "code": "def check_status(self):\n \n status = {\n 'success': False\n }\n try:\n con = self.__connect()\n with closing(con) as con:\n status['success'] = con.is_connected()\n except Exception as e:\n log.error(f'Error connecting to MySQL {self.database}, {e}!')\n status['error'] = e\n return status\n", + "url": "https://github.com/mindsdb/mindsdb.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 146, + "n_words": 34, + "vocab_size": 29, + "complexity": 2, + "nloc": 12, + "token_counts": 60, + "n_ast_nodes": 122, + "n_identifiers": 12, + "d_id": 25250, + "documentation": { + "docstring": "\n Check the connection of the MySQL database\n :return: success status and error message if error occurs\n ", + "n_words": 16, + "vocab_size": 14, + "n_whitespaces": 38, + "language": "en" + } + }, + { + "id": 66747, + "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", + "repo": "erpnext", + "path": "erpnext/patches/v13_0/delete_report_requested_items_to_order.py", + "file_name": "delete_report_requested_items_to_order.py", + "fun_name": "execute", + "commit_message": "style: format code with black", + "code": "def execute():\n\t\n\tauto_email_reports = frappe.db.get_values(\n\t\t\"Auto Email Report\", {\"report\": \"Requested Items to Order\"}, [\"name\"]\n\t)\n\tfor auto_email_report in auto_email_reports:\n\t\tfrappe.delete_doc(\"Auto Email Report\", auto_email_report[0])\n\n\tfrappe.db.sql(\n\t\t\n\t)\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 17, + "n_words": 25, + "vocab_size": 22, + "complexity": 2, + "nloc": 12, + "token_counts": 49, + "n_ast_nodes": 89, + "n_identifiers": 8, + "d_id": 14316, + "documentation": { + "docstring": "Check for one or multiple Auto Email Reports and delete\n\t\tDELETE FROM `tabReport`\n\t\tWHERE name = 'Requested Items to Order'\n\t", + "n_words": 20, + "vocab_size": 20, + "n_whitespaces": 17, + "language": "en" + } + }, + { + "id": 173321, + "commit_id": "fbac3e38ac116855b930ee60fb3c997337ae17b7", + "repo": "calibre-web", + "path": "cps/helper.py", + "file_name": "helper.py", + "fun_name": "check_send_to_ereader", + "commit_message": "Eenabled send epubs to E-Reader devices", + "code": "def check_send_to_ereader(entry):\n \n formats = list()\n book_formats = list()\n if len(entry.data):\n for ele in iter(entry.data):\n if ele.uncompressed_size < config.mail_size:\n formats.append(ele.format)\n if 'EPUB' in formats:\n book_formats.append({'format': 'Epub',\n 'convert': 0,\n 'text': _('Send %(format)s to E-Reader', format='Epub')})\n if 'MOBI' in formats:\n book_formats.append({'format': 'Mobi',\n 'convert': 0,\n 'text': _('Send %(format)s to E-Reader', format='Mobi')})\n if 'PDF' in formats:\n book_formats.append({'format': 'Pdf',\n 'convert': 0,\n 'text': _('Send %(format)s to E-Reader', format='Pdf')})\n if 'AZW' in formats:\n book_formats.append({'format': 'Azw',\n 'convert': 0,\n 'text': _('Send %(format)s to E-Reader', format='Azw')})\n if config.config_converterpath:\n book_formats.extend(check_send_to_ereader_with_converter(formats))\n return book_formats\n else:\n log.error(u'Cannot find book entry %d', entry.id)\n return None\n\n\n# Check if a reader is existing for any of the book formats, if not, return empty list, otherwise return\n# list with supported formats", + "url": "https://github.com/janeczku/calibre-web.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 527, + "n_words": 114, + "vocab_size": 65, + "complexity": 9, + "nloc": 29, + "token_counts": 202, + "n_ast_nodes": 370, + "n_identifiers": 21, + "d_id": 40835, + "documentation": { + "docstring": "\n returns all available book formats for sending to E-Reader\n ", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 20, + "language": "en" + } + }, + { + "id": 289390, + "commit_id": "31a787558fd312331b55e5c2c4b33341fc3601fc", + "repo": "core", + "path": "tests/components/history_stats/test_sensor.py", + "file_name": "test_sensor.py", + "fun_name": "test_async_on_entire_period", + "commit_message": "Ensure recorder test fixture is setup before hass fixture (#80528)\n\n* Ensure recorder test fixture is setup before hass fixture\r\n\r\n* Adjust more tests", + "code": "async def test_async_on_entire_period(recorder_mock, hass):\n \n start_time = dt_util.utcnow() - timedelta(minutes=60)\n t0 = start_time + timedelta(minutes=20)\n t1 = t0 + timedelta(minutes=10)\n t2 = t1 + timedelta(minutes=10)\n\n # Start t0 t1 t2 End\n # |--20min--|--20min--|--10min--|--10min--|\n # |---on----|--off----|---on----|--off----|\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 83, + "n_words": 34, + "vocab_size": 20, + "complexity": 2, + "nloc": 62, + "token_counts": 284, + "n_ast_nodes": 90, + "n_identifiers": 11, + "d_id": 88532, + "documentation": { + "docstring": "Test the history statistics sensor measuring as on the entire period.", + "n_words": 11, + "vocab_size": 10, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 224461, + "commit_id": "f79b34d174e41084391868e7b503f5c61b8b1bdf", + "repo": "mkdocs", + "path": "mkdocs/plugins.py", + "file_name": "plugins.py", + "fun_name": "on_post_template", + "commit_message": "Move plugin events docs into source code + refactor\n\n* Create real (no-op) methods for each event in the base class.\n* Refactor event dispatcher to not check for methods' existence, instead just call them.\n* Move documentation from Markdown into docstrings of these methods.\n* Activate the 'mkdocstrings' plugin.\n* Use 'mkdocstrings' to insert documentation from those docstrings into the site.", + "code": "def on_post_template(self, output_content, template_name, config):\n \n return output_content\n\n # Page events\n", + "url": "https://github.com/mkdocs/mkdocs.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 6, + "n_whitespaces": 27, + "n_words": 10, + "vocab_size": 10, + "complexity": 1, + "nloc": 2, + "token_counts": 14, + "n_ast_nodes": 23, + "n_identifiers": 5, + "d_id": 57306, + "documentation": { + "docstring": "\n The `post_template` event is called after the template is rendered, but before\n it is written to disc and can be used to alter the output of the template.\n If an empty string is returned, the template is skipped and nothing is is\n written to disc.\n\n Parameters:\n output_content: output of rendered template as string\n template_name: string filename of template\n config: global configuration object\n\n Returns:\n output of rendered template as string\n ", + "n_words": 69, + "vocab_size": 42, + "n_whitespaces": 163, + "language": "en" + } + }, + { + "id": 101218, + "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", + "repo": "faceswap", + "path": "lib/align/alignments.py", + "file_name": "alignments.py", + "fun_name": "frame_has_faces", + "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", + "code": "def frame_has_faces(self, frame_name):\n \n retval = bool(self._data.get(frame_name, {}).get(\"faces\", []))\n logger.trace(\"'%s': %s\", frame_name, retval)\n return retval\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 42, + "n_words": 14, + "vocab_size": 13, + "complexity": 1, + "nloc": 4, + "token_counts": 44, + "n_ast_nodes": 73, + "n_identifiers": 9, + "d_id": 20638, + "documentation": { + "docstring": " Check whether a given frame_name exists within the alignments :attr:`data` and contains\n at least 1 face.\n\n Parameters\n ----------\n frame_name: str\n The frame name to check. This should be the base name of the frame, not the full path\n\n Returns\n -------\n bool\n ``True`` if the given frame_name exists within the alignments :attr:`data` and has at\n least 1 face associated with it, otherwise ``False``\n ", + "n_words": 62, + "vocab_size": 46, + "n_whitespaces": 152, + "language": "en" + } + }, + { + "id": 269108, + "commit_id": "9dc9a78cc6502226775a99725c654fab3298aa5f", + "repo": "keras", + "path": "keras/utils/image_utils.py", + "file_name": "image_utils.py", + "fun_name": "smart_resize", + "commit_message": "Expose all utilities in `keras.utils.__init__.py`, and resolve the hourglass import issues that led to the creation of an extraneous `all_utils.py` file / library.\n\nPiperOrigin-RevId: 435725558", + "code": "def smart_resize(x, size, interpolation='bilinear'):\n \n if len(size) != 2:\n raise ValueError('Expected `size` to be a tuple of 2 integers, '\n f'but got: {size}.')\n img = tf.convert_to_tensor(x)\n if img.shape.rank is not None:\n if img.shape.rank < 3 or img.shape.rank > 4:\n raise ValueError(\n 'Expected an image array with shape `(height, width, channels)`, '\n 'or `(batch_size, height, width, channels)`, but '\n f'got input with incorrect rank, of shape {img.shape}.')\n shape = tf.shape(img)\n height, width = shape[-3], shape[-2]\n target_height, target_width = size\n if img.shape.rank is not None:\n static_num_channels = img.shape[-1]\n else:\n static_num_channels = None\n\n crop_height = tf.cast(\n tf.cast(width * target_height, 'float32') / target_width, 'int32')\n crop_width = tf.cast(\n tf.cast(height * target_width, 'float32') / target_height, 'int32')\n\n # Set back to input height / width if crop_height / crop_width is not smaller.\n crop_height = tf.minimum(height, crop_height)\n crop_width = tf.minimum(width, crop_width)\n\n crop_box_hstart = tf.cast(\n tf.cast(height - crop_height, 'float32') / 2, 'int32')\n crop_box_wstart = tf.cast(tf.cast(width - crop_width, 'float32') / 2, 'int32')\n\n if img.shape.rank == 4:\n crop_box_start = tf.stack([0, crop_box_hstart, crop_box_wstart, 0])\n crop_box_size = tf.stack([-1, crop_height, crop_width, -1])\n else:\n crop_box_start = tf.stack([crop_box_hstart, crop_box_wstart, 0])\n crop_box_size = tf.stack([crop_height, crop_width, -1])\n\n img = tf.slice(img, crop_box_start, crop_box_size)\n img = tf.image.resize(images=img, size=size, method=interpolation)\n # Apparent bug in resize_images_v2 may cause shape to be lost\n if img.shape.rank is not None:\n if img.shape.rank == 4:\n img.set_shape((None, None, None, static_num_channels))\n if img.shape.rank == 3:\n img.set_shape((None, None, static_num_channels))\n if isinstance(x, np.ndarray):\n return img.numpy()\n return img\n\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 360, + "n_words": 226, + "vocab_size": 122, + "complexity": 11, + "nloc": 43, + "token_counts": 404, + "n_ast_nodes": 646, + "n_identifiers": 35, + "d_id": 79905, + "documentation": { + "docstring": "Resize images to a target size without aspect ratio distortion.\n\n Warning: `tf.keras.preprocessing.image.smart_resize` is not recommended for\n new code. Prefer `tf.keras.layers.Resizing`, which provides the same\n functionality as a preprocessing layer and adds `tf.RaggedTensor` support. See\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers)\n for an overview of preprocessing layers.\n\n TensorFlow image datasets typically yield images that have each a different\n size. However, these images need to be batched before they can be\n processed by Keras layers. To be batched, images need to share the same height\n and width.\n\n You could simply do:\n\n ```python\n size = (200, 200)\n ds = ds.map(lambda img: tf.image.resize(img, size))\n ```\n\n However, if you do this, you distort the aspect ratio of your images, since\n in general they do not all have the same aspect ratio as `size`. This is\n fine in many cases, but not always (e.g. for GANs this can be a problem).\n\n Note that passing the argument `preserve_aspect_ratio=True` to `resize`\n will preserve the aspect ratio, but at the cost of no longer respecting the\n provided target size. Because `tf.image.resize` doesn't crop images,\n your output images will still have different sizes.\n\n This calls for:\n\n ```python\n size = (200, 200)\n ds = ds.map(lambda img: smart_resize(img, size))\n ```\n\n Your output images will actually be `(200, 200)`, and will not be distorted.\n Instead, the parts of the image that do not fit within the target size\n get cropped out.\n\n The resizing process is:\n\n 1. Take the largest centered crop of the image that has the same aspect ratio\n as the target size. For instance, if `size=(200, 200)` and the input image has\n size `(340, 500)`, we take a crop of `(340, 340)` centered along the width.\n 2. Resize the cropped image to the target size. In the example above,\n we resize the `(340, 340)` crop to `(200, 200)`.\n\n Args:\n x: Input image or batch of images (as a tensor or NumPy array). Must be in\n format `(height, width, channels)` or `(batch_size, height, width,\n channels)`.\n size: Tuple of `(height, width)` integer. Target size.\n interpolation: String, interpolation to use for resizing. Defaults to\n `'bilinear'`. Supports `bilinear`, `nearest`, `bicubic`, `area`,\n `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`.\n\n Returns:\n Array with shape `(size[0], size[1], channels)`. If the input image was a\n NumPy array, the output is a NumPy array, and if it was a TF tensor,\n the output is a TF tensor.\n ", + "n_words": 383, + "vocab_size": 215, + "n_whitespaces": 460, + "language": "en" + } + }, + { + "id": 69882, + "commit_id": "be927fda3dc4118b77ad0f88d5e6deb652a5f5b3", + "repo": "glances", + "path": "glances/plugins/glances_ports.py", + "file_name": "glances_ports.py", + "fun_name": "_port_scan_icmp", + "commit_message": "Prepare memory leak test. Not active for the moment", + "code": "def _port_scan_icmp(self, port):\n \n ret = None\n\n # Create the ping command\n # Use the system ping command because it already have the sticky bit set\n # Python can not create ICMP packet with non root right\n if WINDOWS:\n timeout_opt = '-w'\n count_opt = '-n'\n elif MACOS or BSD:\n timeout_opt = '-t'\n count_opt = '-c'\n else:\n # Linux and co...\n timeout_opt = '-W'\n count_opt = '-c'\n # Build the command line\n # Note: Only string are allowed\n cmd = [\n 'ping',\n count_opt,\n '1',\n timeout_opt,\n str(self._resolv_name(port['timeout'])),\n self._resolv_name(port['host']),\n ]\n fnull = open(os.devnull, 'w')\n\n try:\n counter = Counter()\n ret = subprocess.check_call(cmd, stdout=fnull, stderr=fnull, close_fds=True)\n if ret == 0:\n port['status'] = counter.get()\n else:\n port['status'] = False\n except subprocess.CalledProcessError:\n # Correct issue #1084: No Offline status for timed-out ports\n port['status'] = False\n except Exception as e:\n logger.debug(\"{}: Error while pinging host {} ({})\".format(self.plugin_name, port['host'], e))\n\n fnull.close()\n\n return ret\n", + "url": "https://github.com/nicolargo/glances.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 518, + "n_words": 142, + "vocab_size": 103, + "complexity": 7, + "nloc": 33, + "token_counts": 177, + "n_ast_nodes": 314, + "n_identifiers": 32, + "d_id": 15135, + "documentation": { + "docstring": "Scan the (ICMP) port structure (dict) and update the status key.", + "n_words": 11, + "vocab_size": 10, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 182936, + "commit_id": "36d7973c7c6792fd1100d5512140a4701b53ba3d", + "repo": "textual", + "path": "src/textual/devtools/service.py", + "file_name": "service.py", + "fun_name": "shutdown", + "commit_message": "Code review actions", + "code": "async def shutdown(self) -> None:\n \n\n # Stop polling/writing Console dimensions to clients\n self.shutdown_event.set()\n await self.size_poll_task\n\n # We're shutting down the server, so inform all connected clients\n for client in self.clients:\n await client.close()\n self.clients.clear()\n\n", + "url": "https://github.com/Textualize/textual.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 93, + "n_words": 33, + "vocab_size": 30, + "complexity": 2, + "nloc": 7, + "token_counts": 39, + "n_ast_nodes": 71, + "n_identifiers": 9, + "d_id": 44013, + "documentation": { + "docstring": "Stop server async tasks and clean up all client handlers", + "n_words": 10, + "vocab_size": 10, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 166669, + "commit_id": "7e23a37e1c5bda81234801a6584563e2880769eb", + "repo": "pandas", + "path": "pandas/core/indexes/interval.py", + "file_name": "interval.py", + "fun_name": "_intersection", + "commit_message": "ENH: consistency of input args for boundaries - Interval (#46522)", + "code": "def _intersection(self, other, sort):\n \n # For IntervalIndex we also know other.inclusive == self.inclusive\n if self.left.is_unique and self.right.is_unique:\n taken = self._intersection_unique(other)\n elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:\n # Swap other/self if other is unique and self does not have\n # multiple NaNs\n taken = other._intersection_unique(self)\n else:\n # duplicates\n taken = self._intersection_non_unique(other)\n\n if sort is None:\n taken = taken.sort_values()\n\n return taken\n", + "url": "https://github.com/pandas-dev/pandas.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 187, + "n_words": 61, + "vocab_size": 45, + "complexity": 7, + "nloc": 10, + "token_counts": 88, + "n_ast_nodes": 147, + "n_identifiers": 13, + "d_id": 39853, + "documentation": { + "docstring": "\n intersection specialized to the case with matching dtypes.\n ", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 23, + "language": "en" + } + }, + { + "id": 276218, + "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", + "repo": "keras", + "path": "keras/saving/saved_model_experimental.py", + "file_name": "saved_model_experimental.py", + "fun_name": "load_from_saved_model", + "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", + "code": "def load_from_saved_model(saved_model_path, custom_objects=None):\n \n warnings.warn(\n \"`tf.keras.experimental.load_from_saved_model` is deprecated\"\n \"and will be removed in a future version. \"\n \"Please switch to `tf.keras.models.load_model`.\",\n stacklevel=2,\n )\n # restore model topology from json string\n model_json_filepath = tf.io.gfile.join(\n tf.compat.as_bytes(saved_model_path),\n tf.compat.as_bytes(tf.saved_model.ASSETS_DIRECTORY),\n tf.compat.as_bytes(SAVED_MODEL_FILENAME_JSON),\n )\n with tf.io.gfile.GFile(model_json_filepath, \"r\") as f:\n model_json = f.read()\n model = model_config.model_from_json(\n model_json, custom_objects=custom_objects\n )\n\n # restore model weights\n checkpoint_prefix = tf.io.gfile.join(\n tf.compat.as_text(saved_model_path),\n tf.compat.as_text(tf.saved_model.VARIABLES_DIRECTORY),\n tf.compat.as_text(tf.saved_model.VARIABLES_FILENAME),\n )\n model.load_weights(checkpoint_prefix)\n return model\n\n\n#### Directory / path helpers\n\n", + "url": "https://github.com/keras-team/keras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 194, + "n_words": 69, + "vocab_size": 57, + "complexity": 1, + "nloc": 24, + "token_counts": 154, + "n_ast_nodes": 250, + "n_identifiers": 28, + "d_id": 81599, + "documentation": { + "docstring": "Loads a keras Model from a SavedModel created by `export_saved_model()`.\n\n This function reinstantiates model state by:\n 1) loading model topology from json (this will eventually come\n from metagraph).\n 2) loading model weights from checkpoint.\n\n Example:\n\n ```python\n import tensorflow as tf\n\n # Create a tf.keras model.\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Dense(1, input_shape=[10]))\n model.summary()\n\n # Save the tf.keras model in the SavedModel format.\n path = '/tmp/simple_keras_model'\n tf.keras.experimental.export_saved_model(model, path)\n\n # Load the saved keras model back.\n new_model = tf.keras.experimental.load_from_saved_model(path)\n new_model.summary()\n ```\n\n Args:\n saved_model_path: a string specifying the path to an existing SavedModel.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n\n Returns:\n a keras.Model instance.\n ", + "n_words": 108, + "vocab_size": 82, + "n_whitespaces": 207, + "language": "en" + } + }, + { + "id": 109556, + "commit_id": "e87416b33b01f6fc4e3b2290d6e8a60e6ddb6e55", + "repo": "matplotlib", + "path": "doc/conf.py", + "file_name": "conf.py", + "fun_name": "add_html_cache_busting", + "commit_message": "DOC: Add cache busting to all static assets\n\nWe have seen both in `/stable` and `/3.6.0`, some styling is broken\nbecause old CSS is cached. CSS might change from updating\nsphinx-gallery, mpl-sphinx-theme, pydata-sphinx-theme, etc. Adding a\nversioned query breaks the cache. It's a bit over-eager to base it on\nMatplotlib version and not the file contents (since those dependencies\nmay not have updated), but this should work well enough.", + "code": "def add_html_cache_busting(app, pagename, templatename, context, doctree):\n \n from sphinx.builders.html import Stylesheet, JavaScript\n\n css_tag = context['css_tag']\n js_tag = context['js_tag']\n", + "url": "https://github.com/matplotlib/matplotlib.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 29, + "n_words": 17, + "vocab_size": 16, + "complexity": 1, + "nloc": 8, + "token_counts": 52, + "n_ast_nodes": 57, + "n_identifiers": 13, + "d_id": 23644, + "documentation": { + "docstring": "\n Add cache busting query on CSS and JavaScript assets.\n\n This adds the Matplotlib version as a query to the link reference in the\n HTML, if the path is not absolute (i.e., it comes from the `_static`\n directory) and doesn't already have a query.\n ", + "n_words": 43, + "vocab_size": 36, + "n_whitespaces": 59, + "language": "en" + } + }, + { + "id": 298684, + "commit_id": "04b9c9300645fb30541e2bf0881d35cc698a47c5", + "repo": "core", + "path": "homeassistant/components/econet/climate.py", + "file_name": "climate.py", + "fun_name": "target_temperature_high", + "commit_message": "Use climate enums in econet (#70633)", + "code": "def target_temperature_high(self):\n \n if self.hvac_mode == HVACMode.HEAT_COOL:\n return self._econet.cool_set_point\n return None\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 42, + "n_words": 10, + "vocab_size": 9, + "complexity": 2, + "nloc": 4, + "token_counts": 23, + "n_ast_nodes": 39, + "n_identifiers": 7, + "d_id": 97626, + "documentation": { + "docstring": "Return the higher bound temperature we try to reach.", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 119183, + "commit_id": "d9dcd1394aedf760272f14c3560cd5415495c28a", + "repo": "jax", + "path": "jax/_src/numpy/lax_numpy.py", + "file_name": "lax_numpy.py", + "fun_name": "_promote_shapes", + "commit_message": "djax: let make_jaxpr build dyn shape jaxprs", + "code": "def _promote_shapes(fun_name, *args):\n \n if len(args) < 2:\n return args\n else:\n shapes = [shape(arg) for arg in args]\n if _all(len(shapes[0]) == len(s) for s in shapes[1:]):\n return args # no need for rank promotion, so rely on lax promotion\n nonscalar_ranks = {len(shp) for shp in shapes if shp}\n if len(nonscalar_ranks) < 2:\n return args\n else:\n if config.jax_numpy_rank_promotion != \"allow\":\n _rank_promotion_warning_or_error(fun_name, shapes)\n result_shape = lax.broadcast_shapes(*shapes)\n return [broadcast_to(arg, result_shape) for arg, shp in zip(args, shapes)]\n", + "url": "https://github.com/google/jax.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 126, + "n_words": 72, + "vocab_size": 49, + "complexity": 10, + "nloc": 15, + "token_counts": 128, + "n_ast_nodes": 203, + "n_identifiers": 19, + "d_id": 26552, + "documentation": { + "docstring": "Apply NumPy-style broadcasting, making args shape-compatible for lax.py.", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 176008, + "commit_id": "b97d27d2e916025f65fed751d54c089d4d4bd022", + "repo": "autokeras", + "path": "autokeras/keras_layers.py", + "file_name": "keras_layers.py", + "fun_name": "_build_attention", + "commit_message": "clean up imports", + "code": "def _build_attention(self, qkv_rank):\n \n if self._attention_axes is None:\n self._attention_axes = tuple(range(1, qkv_rank - 2))\n else:\n self._attention_axes = tuple(self._attention_axes) # pragma: no cover\n (\n self._dot_product_equation,\n self._combine_equation,\n attn_scores_rank,\n ) = _build_attention_equation(qkv_rank, attn_axes=self._attention_axes)\n norm_axes = tuple(\n range(attn_scores_rank - len(self._attention_axes), attn_scores_rank)\n )\n self._masked_softmax = MaskedSoftmax(\n mask_expansion_axes=[1], normalization_axes=norm_axes\n )\n self._dropout_layer = layers.Dropout(rate=self._dropout)\n", + "url": "https://github.com/keras-team/autokeras.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 194, + "n_words": 46, + "vocab_size": 36, + "complexity": 2, + "nloc": 17, + "token_counts": 113, + "n_ast_nodes": 176, + "n_identifiers": 22, + "d_id": 41666, + "documentation": { + "docstring": "Builds multi-head dot-product attention computations.\n\n This function builds attributes necessary for `_compute_attention` to\n costomize attention computation to replace the default dot-product\n attention.\n\n Args:\n qkv_rank: the rank of query, key, value tensors.\n ", + "n_words": 31, + "vocab_size": 27, + "n_whitespaces": 75, + "language": "en" + } + }, + { + "id": 248358, + "commit_id": "4cc4229cd7a55d2556c798fecbb1c9660dc821c8", + "repo": "synapse", + "path": "tests/rest/client/test_retention.py", + "file_name": "test_retention.py", + "fun_name": "test_visibility_when_disabled", + "commit_message": "Prevent expired events from being filtered out when retention is disabled (#12611)\n\nCo-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>\r\nCo-authored-by: Patrick Cloke ", + "code": "def test_visibility_when_disabled(self) -> None:\n \n room_id = self.helper.create_room_as(self.user_id, tok=self.token)\n\n self.helper.send_state(\n room_id=room_id,\n event_type=EventTypes.Retention,\n body={\"max_lifetime\": one_day_ms},\n tok=self.token,\n )\n\n resp = self.helper.send(room_id=room_id, body=\"test\", tok=self.token)\n\n self.reactor.advance(one_day_ms * 2 / 1000)\n\n self.get_event(room_id, resp[\"event_id\"])\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 120, + "n_words": 27, + "vocab_size": 25, + "complexity": 1, + "nloc": 12, + "token_counts": 102, + "n_ast_nodes": 160, + "n_identifiers": 19, + "d_id": 72235, + "documentation": { + "docstring": "Retention policies should be ignored when the retention feature is disabled.", + "n_words": 11, + "vocab_size": 11, + "n_whitespaces": 10, + "language": "en" + } + }, + { + "id": 65709, + "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", + "repo": "erpnext", + "path": "erpnext/crm/doctype/contract/contract.py", + "file_name": "contract.py", + "fun_name": "update_status_for_contracts", + "commit_message": "style: format code with black", + "code": "def update_status_for_contracts():\n\t\n\n\tcontracts = frappe.get_all(\n\t\t\"Contract\",\n\t\tfilters={\"is_signed\": True, \"docstatus\": 1},\n\t\tfields=[\"name\", \"start_date\", \"end_date\"],\n\t)\n\n\tfor contract in contracts:\n\t\tstatus = get_status(contract.get(\"start_date\"), contract.get(\"end_date\"))\n\n\t\tfrappe.db.set_value(\"Contract\", contract.get(\"name\"), \"status\", status)\n", + "url": "https://github.com/frappe/erpnext.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 17, + "n_words": 26, + "vocab_size": 25, + "complexity": 2, + "nloc": 9, + "token_counts": 78, + "n_ast_nodes": 138, + "n_identifiers": 12, + "d_id": 13992, + "documentation": { + "docstring": "\n\tRun the daily hook to update the statuses for all signed\n\tand submitted Contracts\n\t", + "n_words": 14, + "vocab_size": 13, + "n_whitespaces": 12, + "language": "en" + } + }, + { + "id": 308463, + "commit_id": "d26275011ae4e8ba0a8dcdc2a7ef81b5911d3900", + "repo": "core", + "path": "tests/components/command_line/test_cover.py", + "file_name": "test_cover.py", + "fun_name": "test_unique_id", + "commit_message": "Add unique_id configuration variable to command_line integration (#58596)", + "code": "async def test_unique_id(hass):\n \n await setup_test_entity(\n hass,\n {\n \"unique\": {\n \"command_open\": \"echo open\",\n \"command_close\": \"echo close\",\n \"command_stop\": \"echo stop\",\n \"unique_id\": \"unique\",\n },\n \"not_unique_1\": {\n \"command_open\": \"echo open\",\n \"command_close\": \"echo close\",\n \"command_stop\": \"echo stop\",\n \"unique_id\": \"not-so-unique-anymore\",\n },\n \"not_unique_2\": {\n \"command_open\": \"echo open\",\n \"command_close\": \"echo close\",\n \"command_stop\": \"echo stop\",\n \"unique_id\": \"not-so-unique-anymore\",\n },\n },\n )\n\n assert len(hass.states.async_all()) == 2\n\n ent_reg = entity_registry.async_get(hass)\n\n assert len(ent_reg.entities) == 2\n assert ent_reg.async_get_entity_id(\"cover\", \"command_line\", \"unique\") is not None\n assert (\n ent_reg.async_get_entity_id(\"cover\", \"command_line\", \"not-so-unique-anymore\")\n is not None\n )\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 386, + "n_words": 78, + "vocab_size": 38, + "complexity": 1, + "nloc": 32, + "token_counts": 138, + "n_ast_nodes": 264, + "n_identifiers": 11, + "d_id": 107219, + "documentation": { + "docstring": "Test unique_id option and if it only creates one cover per id.", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 11, + "language": "en" + } + }, + { + "id": 197534, + "commit_id": "7fe8e027ae1d7f683243c0229b961671a6cbb4c5", + "repo": "sympy", + "path": "sympy/stats/joint_rv_types.py", + "file_name": "joint_rv_types.py", + "fun_name": "MultivariateT", + "commit_message": "Improved some documentation in the stats module", + "code": "def MultivariateT(syms, mu, sigma, v):\n \n return multivariate_rv(MultivariateTDistribution, syms, mu, sigma, v)\n\n\n#-------------------------------------------------------------------------------\n# Multivariate Normal Gamma distribution ---------------------------------------\n", + "url": "https://github.com/sympy/sympy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 7, + "n_whitespaces": 22, + "n_words": 18, + "vocab_size": 16, + "complexity": 1, + "nloc": 2, + "token_counts": 25, + "n_ast_nodes": 37, + "n_identifiers": 7, + "d_id": 48612, + "documentation": { + "docstring": "\n Creates a joint random variable with multivariate T-distribution.\n\n Parameters\n ==========\n\n syms : A symbol/str\n For identifying the random variable.\n mu : A list/matrix\n Representing the location vector\n sigma : The shape matrix for the distribution\n\n Examples\n ========\n\n >>> from sympy.stats import density, MultivariateT\n >>> from sympy import Symbol\n\n >>> x = Symbol(\"x\")\n >>> X = MultivariateT(\"x\", [1, 1], [[1, 0], [0, 1]], 2)\n\n >>> density(X)(1, 2)\n 2/(9*pi)\n\n Returns\n =======\n\n RandomSymbol\n\n ", + "n_words": 70, + "vocab_size": 56, + "n_whitespaces": 139, + "language": "en" + } + }, + { + "id": 128291, + "commit_id": "db2f84bdfa49d218f97bf7f10678232bff8c48d5", + "repo": "ray", + "path": "dashboard/modules/job/job_head.py", + "file_name": "job_head.py", + "fun_name": "choose_agent", + "commit_message": "[Job Submission][refactor 5/N] Remove the head node dependency on the `Raylet` process (#28599)\n\n* introduce stop_job\r\n\r\nSigned-off-by: Catch-Bull \r\n\r\n* save\r\n\r\nSigned-off-by: Catch-Bull \r\n\r\n* save\r\n\r\nSigned-off-by: Catch-Bull \r\n\r\n* save\r\n\r\nSigned-off-by: Catch-Bull \r\n\r\n* head rayletless\r\n\r\nSigned-off-by: Catch-Bull \r\n\r\n* fix UT\r\n\r\nSigned-off-by: Catch-Bull \r\n\r\n* fix UT\r\n\r\nSigned-off-by: Catch-Bull \r\n\r\n* save\r\n\r\nSigned-off-by: Catch-Bull \r\n\r\n* refactor choose_agent\r\n\r\nSigned-off-by: Catch-Bull \r\n\r\n* fix\r\n\r\nSigned-off-by: Catch-Bull \r\n\r\n* save\r\n\r\nSigned-off-by: Catch-Bull \r\n\r\n* save\r\n\r\nSigned-off-by: Catch-Bull \r\n\r\n* fix UT\r\n\r\n* delete mock\r\n\r\nSigned-off-by: Catch-Bull \r\n\r\n* Use \"auto\" for entrypoint script\r\n\r\nSigned-off-by: Archit Kulkarni \r\n\r\nSigned-off-by: Catch-Bull \r\nSigned-off-by: Archit Kulkarni \r\nCo-authored-by: Archit Kulkarni ", + "code": "async def choose_agent(self) -> Optional[JobAgentSubmissionClient]:\n \n # the number of agents which has an available HTTP port.\n while True:\n raw_agent_infos = await DataOrganizer.get_all_agent_infos()\n agent_infos = {\n key: value\n for key, value in raw_agent_infos.items()\n if value.get(\"httpPort\", -1) > 0\n }\n if len(agent_infos) > 0:\n break\n await asyncio.sleep(dashboard_consts.TRY_TO_GET_AGENT_INFO_INTERVAL_SECONDS)\n # delete dead agents.\n for dead_node in set(self._agents) - set(agent_infos):\n client = self._agents.pop(dead_node)\n await client.close()\n\n if len(self._agents) >= dashboard_consts.CANDIDATE_AGENT_NUMBER:\n node_id = sample(set(self._agents), 1)[0]\n return self._agents[node_id]\n else:\n # Randomly select one from among all agents, it is possible that\n # the selected one already exists in `self._agents`\n node_id = sample(set(agent_infos), 1)[0]\n agent_info = agent_infos[node_id]\n\n if node_id not in self._agents:\n node_ip = agent_info[\"ipAddress\"]\n http_port = agent_info[\"httpPort\"]\n agent_http_address = f\"http://{node_ip}:{http_port}\"\n self._agents[node_id] = JobAgentSubmissionClient(agent_http_address)\n\n return self._agents[node_id]\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 451, + "n_words": 117, + "vocab_size": 86, + "complexity": 8, + "nloc": 40, + "token_counts": 199, + "n_ast_nodes": 333, + "n_identifiers": 30, + "d_id": 28655, + "documentation": { + "docstring": "\n Try to disperse as much as possible to select one of\n the `CANDIDATE_AGENT_NUMBER` agents to solve requests.\n the agents will not pop from `self._agents` unless\n it's dead. Saved in `self._agents` is the agent that was\n used before.\n Strategy:\n 1. if the number of `self._agents` has reached\n `CANDIDATE_AGENT_NUMBER`, randomly select one agent from\n `self._agents`.\n 2. if not, randomly select one agent from all available agents,\n it is possible that the selected one already exists in\n `self._agents`.\n ", + "n_words": 75, + "vocab_size": 48, + "n_whitespaces": 203, + "language": "en" + } + }, + { + "id": 311447, + "commit_id": "58b8c30221a6f6e5acbbe98b7e3298b03fb741f5", + "repo": "core", + "path": "tests/components/homekit_controller/test_alarm_control_panel.py", + "file_name": "test_alarm_control_panel.py", + "fun_name": "test_switch_read_alarm_state", + "commit_message": "Improve homekit_controller tests (#65266)", + "code": "async def test_switch_read_alarm_state(hass, utcnow):\n \n helper = await setup_test_component(hass, create_security_system_service)\n\n await helper.async_update(\n ServicesTypes.SECURITY_SYSTEM,\n {CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT: 0},\n )\n state = await helper.poll_and_get_state()\n assert state.state == \"armed_home\"\n assert state.attributes[\"battery_level\"] == 50\n\n await helper.async_update(\n ServicesTypes.SECURITY_SYSTEM,\n {CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT: 1},\n )\n state = await helper.poll_and_get_state()\n assert state.state == \"armed_away\"\n\n await helper.async_update(\n ServicesTypes.SECURITY_SYSTEM,\n {CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT: 2},\n )\n state = await helper.poll_and_get_state()\n assert state.state == \"armed_night\"\n\n await helper.async_update(\n ServicesTypes.SECURITY_SYSTEM,\n {CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT: 3},\n )\n state = await helper.poll_and_get_state()\n assert state.state == \"disarmed\"\n\n await helper.async_update(\n ServicesTypes.SECURITY_SYSTEM,\n {CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT: 4},\n )\n state = await helper.poll_and_get_state()\n assert state.state == \"triggered\"\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 222, + "n_words": 83, + "vocab_size": 30, + "complexity": 1, + "nloc": 33, + "token_counts": 186, + "n_ast_nodes": 305, + "n_identifiers": 14, + "d_id": 110112, + "documentation": { + "docstring": "Test that we can read the state of a HomeKit alarm accessory.", + "n_words": 12, + "vocab_size": 12, + "n_whitespaces": 11, + "language": "en" + } + }, + { + "id": 320067, + "commit_id": "5b66ef0a748fd5570361a2a1ed6147e0462568d2", + "repo": "paperless-ngx", + "path": "src/documents/tests/test_api.py", + "file_name": "test_api.py", + "fun_name": "test_task_name_consume_folder", + "commit_message": "Updates how task_args and task_kwargs are parsed, adds testing to cover everything I can think of", + "code": "def test_task_name_consume_folder(self):\n \n result1 = TaskResult.objects.create(\n task_id=str(uuid.uuid4()),\n task_name=\"documents.tasks.some_task\",\n status=celery.states.SUCCESS,\n task_args=\"\\\"('/consume/anothertest.pdf',)\\\"\",\n task_kwargs=\"\\\"{'override_tag_ids': None}\\\"\",\n )\n _ = PaperlessTask.objects.create(attempted_task=result1)\n\n response = self.client.get(self.ENDPOINT)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.data), 1)\n\n returned_data = response.data[0]\n\n self.assertEqual(returned_data[\"name\"], \"anothertest.pdf\")\n", + "url": "https://github.com/paperless-ngx/paperless-ngx.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 145, + "n_words": 27, + "vocab_size": 24, + "complexity": 1, + "nloc": 14, + "token_counts": 112, + "n_ast_nodes": 186, + "n_identifiers": 29, + "d_id": 117052, + "documentation": { + "docstring": "\n GIVEN:\n - Attempted celery task\n - Task was created through the consume folder\n WHEN:\n - API call is made to get tasks\n THEN:\n - Returned data include the filename\n ", + "n_words": 29, + "vocab_size": 25, + "n_whitespaces": 102, + "language": "en" + } + }, + { + "id": 114339, + "commit_id": "30877cf7ead465750763822b3c88f970c870d9dd", + "repo": "mindsdb", + "path": "mindsdb/integrations/mlflow_handler/mlflow/mlflow_integration.py", + "file_name": "mlflow_integration.py", + "fun_name": "_prepare_registry", + "commit_message": "feat: add mlflow, mysql, base handlers", + "code": "def _prepare_registry(self):\n # noqa\n cur = self.internal_registry.cursor()\n if ('models',) not in list(cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")):\n cur.execute() # TODO: dtype_dict?\n self.internal_registry.commit()\n", + "url": "https://github.com/mindsdb/mindsdb.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 67, + "n_words": 22, + "vocab_size": 21, + "complexity": 2, + "nloc": 5, + "token_counts": 45, + "n_ast_nodes": 84, + "n_identifiers": 8, + "d_id": 25156, + "documentation": { + "docstring": " Checks that sqlite records of registered models exists, otherwise creates it. create table models (model_name text, format text, target text, url text)", + "n_words": 22, + "vocab_size": 19, + "n_whitespaces": 22, + "language": "en" + } + }, + { + "id": 101382, + "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", + "repo": "faceswap", + "path": "scripts/convert.py", + "file_name": "convert.py", + "fun_name": "_get_threads", + "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", + "code": "def _get_threads(self) -> MultiThread:\n \n # TODO Check if multiple threads actually speeds anything up\n save_queue = queue_manager.get_queue(\"convert_out\")\n patch_queue = queue_manager.get_queue(\"patch\")\n return MultiThread(self._converter.process, patch_queue, save_queue,\n thread_count=self._pool_processes, name=\"patch\")\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 87, + "n_words": 26, + "vocab_size": 25, + "complexity": 1, + "nloc": 11, + "token_counts": 47, + "n_ast_nodes": 80, + "n_identifiers": 12, + "d_id": 20797, + "documentation": { + "docstring": " Get the threads for patching the converted faces onto the frames.\n\n Returns\n :class:`lib.multithreading.MultiThread`\n The threads that perform the patching of swapped faces onto the output frames\n ", + "n_words": 26, + "vocab_size": 18, + "n_whitespaces": 59, + "language": "en" + } + }, + { + "id": 221346, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/cmd.py", + "file_name": "cmd.py", + "fun_name": "parseline", + "commit_message": "add python 3.10.4 for windows", + "code": "def parseline(self, line):\n \n line = line.strip()\n if not line:\n return None, None, line\n elif line[0] == '?':\n line = 'help ' + line[1:]\n elif line[0] == '!':\n if hasattr(self, 'do_shell'):\n line = 'shell ' + line[1:]\n else:\n return None, None, line\n i, n = 0, len(line)\n while i < n and line[i] in self.identchars: i = i+1\n cmd, arg = line[:i], line[i:].strip()\n return cmd, arg, line\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 203, + "n_words": 66, + "vocab_size": 41, + "complexity": 7, + "nloc": 15, + "token_counts": 129, + "n_ast_nodes": 211, + "n_identifiers": 11, + "d_id": 56361, + "documentation": { + "docstring": "Parse the line into a command name and a string containing\n the arguments. Returns a tuple containing (command, args, line).\n 'command' and 'args' may be None if the line couldn't be parsed.\n ", + "n_words": 32, + "vocab_size": 24, + "n_whitespaces": 54, + "language": "en" + } + }, + { + "id": 107070, + "commit_id": "334cc617b8ed3b6b4ec6cb64ff16a040ef454149", + "repo": "matplotlib", + "path": "lib/matplotlib/widgets.py", + "file_name": "widgets.py", + "fun_name": "_get_animated_artists", + "commit_message": "Fix z_order", + "code": "def _get_animated_artists(self):\n \n return tuple([a for ax_ in self.ax.get_figure().get_axes()\n for a in ax_.get_children()\n if a.get_animated() and a not in self.artists])\n", + "url": "https://github.com/matplotlib/matplotlib.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 75, + "n_words": 19, + "vocab_size": 15, + "complexity": 5, + "nloc": 4, + "token_counts": 48, + "n_ast_nodes": 78, + "n_identifiers": 11, + "d_id": 22584, + "documentation": { + "docstring": "\n Convenience method to get all animated artists of a figure, except\n those already present in self.artists. 'z_order' is ignored.\n ", + "n_words": 19, + "vocab_size": 19, + "n_whitespaces": 41, + "language": "en" + } + }, + { + "id": 247069, + "commit_id": "1901cb1d4a8b7d9af64493fbd336e9aa2561c20c", + "repo": "synapse", + "path": "tests/rest/client/test_shadow_banned.py", + "file_name": "test_shadow_banned.py", + "fun_name": "test_message", + "commit_message": "Add type hints to `tests/rest/client` (#12084)", + "code": "def test_message(self) -> None:\n \n\n room_id = self.helper.create_room_as(\n self.other_user_id, tok=self.other_access_token\n )\n\n # The user should be in the room.\n self.helper.join(room_id, self.banned_user_id, tok=self.banned_access_token)\n\n # Sending a message should complete successfully.\n result = self.helper.send_event(\n room_id=room_id,\n type=EventTypes.Message,\n content={\"msgtype\": \"m.text\", \"body\": \"with right label\"},\n tok=self.banned_access_token,\n )\n self.assertIn(\"event_id\", result)\n event_id = result[\"event_id\"]\n\n latest_events = self.get_success(\n self.store.get_latest_event_ids_in_room(room_id)\n )\n self.assertNotIn(event_id, latest_events)\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 12, + "n_whitespaces": 210, + "n_words": 53, + "vocab_size": 46, + "complexity": 1, + "nloc": 18, + "token_counts": 118, + "n_ast_nodes": 192, + "n_identifiers": 24, + "d_id": 71479, + "documentation": { + "docstring": "Messages from shadow-banned users don't actually get sent.", + "n_words": 8, + "vocab_size": 8, + "n_whitespaces": 7, + "language": "en" + } + }, + { + "id": 187530, + "commit_id": "08209aecf41706dc1eb0171466d88d7fb02aefca", + "repo": "streamlink", + "path": "src/streamlink/plugins/mediaklikk.py", + "file_name": "mediaklikk.py", + "fun_name": "_get_streams", + "commit_message": "plugins: refactor validation schemas\n\nRefactor validation schemas of plugins where schemas are not defined as\nclass attributes or where no major changes are needed:\n\n- Use `validate.none_or_all(...)`\n- Replace `validate.transform(pattern.search)` with `pattern` and fix\n schemas using `validate.regex(pattern)` where a regex has to match\n - Move pattern definitions from class attributes to schema definitions\n - Fix some patterns in regards to quotation matching\n- Remove unneeded type validations, like `str` for example\n- Remove unneeded `TypeError` exceptions from try-except code blocks\n- Fix minor style issues where it makes sense\n - use double quotes\n - add trailing commas\n - fix indentation", + "code": "def _get_streams(self):\n params = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(\n r,\n re.VERBOSE | re.DOTALL,\n ),\n validate.none_or_all(\n validate.get(\"json\"),\n validate.parse_json(),\n {\n \"contentId\": validate.any(str, int),\n validate.optional(\"streamId\"): str,\n validate.optional(\"idec\"): str,\n validate.optional(\"token\"): str,\n },\n ),\n ))\n if not params:\n log.error(\"Could not find player manager data\")\n return\n\n params.update({\n \"video\": (unquote(params.pop(\"token\"))\n if params.get(\"token\") is not None else\n params.pop(\"streamId\")),\n \"noflash\": \"yes\",\n \"embedded\": \"0\",\n })\n\n url_parsed = urlparse(self.url)\n skip_vods = url_parsed.netloc.endswith(\"m4sport.hu\") and url_parsed.path.startswith(\"/elo\")\n\n self.session.http.headers.update({\"Referer\": self.url})\n playlists = self.session.http.get(self.PLAYER_URL, params=params, schema=validate.Schema(\n re.compile(r\"pl\\.setup\\s*\\(\\s*(?P{.*?})\\s*\\)\\s*;\", re.DOTALL),\n validate.none_or_all(\n validate.get(\"json\"),\n validate.parse_json(),\n {\"playlist\": [{\n \"file\": validate.url(),\n \"type\": str,\n }]},\n validate.get(\"playlist\"),\n validate.filter(lambda p: p[\"type\"] == \"hls\"),\n validate.filter(lambda p: not skip_vods or \"vod\" not in p[\"file\"]),\n validate.map(lambda p: update_scheme(\"https://\", p[\"file\"])),\n ),\n ))\n\n for url in playlists or []:\n yield from HLSStream.parse_variant_playlist(self.session, url).items()\n\n\n__plugin__ = Mediaklikk\n", + "url": "https://github.com/streamlink/streamlink.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 20, + "n_whitespaces": 698, + "n_words": 114, + "vocab_size": 88, + "complexity": 7, + "nloc": 52, + "token_counts": 367, + "n_ast_nodes": 609, + "n_identifiers": 44, + "d_id": 45798, + "documentation": { + "docstring": "\n mtva_player_manager\\.player\\s*\\(\\s*\n document\\.getElementById\\(\\s*\"\\w+\"\\s*\\)\\s*,\\s*\n (?P{.*?})\\s*\n \\)\\s*;\n ", + "n_words": 4, + "vocab_size": 4, + "n_whitespaces": 104, + "language": "en" + } + }, + { + "id": 84169, + "commit_id": "a142fbff85302c5e3acb2e204eca2e9c75dbc74b", + "repo": "zulip", + "path": "zerver/tests/test_upload.py", + "file_name": "test_upload.py", + "fun_name": "test_upload_file_with_supplied_mimetype", + "commit_message": "tests: Refactor away result.json() calls with helpers.\n\nSigned-off-by: Zixuan James Li ", + "code": "def test_upload_file_with_supplied_mimetype(self) -> None:\n \n fp = StringIO(\"zulip!\")\n fp.name = \"pasted_file\"\n result = self.api_post(\n self.example_user(\"hamlet\"), \"/api/v1/user_uploads?mimetype=image/png\", {\"file\": fp}\n )\n uri = self.assert_json_success(result)[\"uri\"]\n self.assertTrue(uri.endswith(\"pasted_file.png\"))\n", + "url": "https://github.com/zulip/zulip.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 82, + "n_words": 22, + "vocab_size": 19, + "complexity": 1, + "nloc": 13, + "token_counts": 62, + "n_ast_nodes": 114, + "n_identifiers": 12, + "d_id": 17798, + "documentation": { + "docstring": "\n When files are copied into the system clipboard and pasted for upload\n the filename may not be supplied so the extension is determined from a\n query string parameter.\n ", + "n_words": 28, + "vocab_size": 26, + "n_whitespaces": 57, + "language": "en" + } + }, + { + "id": 177121, + "commit_id": "9b63ca1a0d46a1f50bcc59eda52be02721a134db", + "repo": "networkx", + "path": "networkx/readwrite/graph6.py", + "file_name": "graph6.py", + "fun_name": "write_graph6", + "commit_message": "Remove old Appveyor cruft (#5924)\n\n* Remove old Appveyor cruft\r\n\r\n* Fix Windows issue", + "code": "def write_graph6(G, path, nodes=None, header=True):\n \n return write_graph6_file(G, path, nodes=nodes, header=header)\n\n\n@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")", + "url": "https://github.com/networkx/networkx.git", + "language": "Python", + "ast_errors": "@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")", + "n_ast_errors": 1, + "ast_levels": 8, + "n_whitespaces": 16, + "n_words": 12, + "vocab_size": 11, + "complexity": 1, + "nloc": 2, + "token_counts": 31, + "n_ast_nodes": 67, + "n_identifiers": 7, + "d_id": 42276, + "documentation": { + "docstring": "Write a simple undirected graph to a path in graph6 format.\n\n Parameters\n ----------\n G : Graph (undirected)\n\n path : str\n The path naming the file to which to write the graph.\n\n nodes: list or iterable\n Nodes are labeled 0...n-1 in the order provided. If None the ordering\n given by ``G.nodes()`` is used.\n\n header: bool\n If True add '>>graph6<<' string to head of data\n\n Raises\n ------\n NetworkXNotImplemented\n If the graph is directed or is a multigraph.\n\n ValueError\n If the graph has at least ``2 ** 36`` nodes; the graph6 format\n is only defined for graphs of order less than ``2 ** 36``.\n\n Examples\n --------\n You can write a graph6 file by giving the path to a file::\n\n >>> import tempfile\n >>> with tempfile.NamedTemporaryFile(delete=False) as f:\n ... nx.write_graph6(nx.path_graph(2), f.name)\n ... _ = f.seek(0)\n ... print(f.read())\n b'>>graph6<\n\n ", + "n_words": 184, + "vocab_size": 127, + "n_whitespaces": 373, + "language": "en" + } + }, + { + "id": 250105, + "commit_id": "3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b", + "repo": "synapse", + "path": "tests/storage/test_cleanup_extrems.py", + "file_name": "test_cleanup_extrems.py", + "fun_name": "test_forked_graph_cleanup", + "commit_message": "Require types in tests.storage. (#14646)\n\nAdds missing type hints to `tests.storage` package\r\nand does not allow untyped definitions.", + "code": "def test_forked_graph_cleanup(self) -> None:\n r\n\n # Create the room graph\n event_id_a = self.create_and_send_event(self.room_id, self.user)\n event_id_b = self.create_and_send_event(self.room_id, self.user)\n event_id_sf1 = self.create_and_send_event(\n self.room_id, self.user, True, [event_id_a]\n )\n event_id_sf2 = self.create_and_send_event(\n self.room_id, self.user, True, [event_id_a, event_id_b]\n )\n event_id_sf3 = self.create_and_send_event(\n self.room_id, self.user, True, [event_id_sf1]\n )\n self.create_and_send_event(\n self.room_id, self.user, True, [event_id_sf2, event_id_sf3]\n ) # SF4\n event_id_c = self.create_and_send_event(\n self.room_id, self.user, False, [event_id_sf3]\n )\n\n # Add the new extremity and check the latest events are as expected\n self.add_extremity(self.room_id, event_id_a)\n\n latest_event_ids = self.get_success(\n self.store.get_latest_event_ids_in_room(self.room_id)\n )\n self.assertEqual(set(latest_event_ids), {event_id_a, event_id_b, event_id_c})\n\n # Run the background update and check it did the right thing\n self.run_background_update()\n\n latest_event_ids = self.get_success(\n self.store.get_latest_event_ids_in_room(self.room_id)\n )\n self.assertEqual(set(latest_event_ids), {event_id_b, event_id_c})\n\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 350, + "n_words": 105, + "vocab_size": 61, + "complexity": 1, + "nloc": 46, + "token_counts": 220, + "n_ast_nodes": 327, + "n_identifiers": 19, + "d_id": 73272, + "documentation": { + "docstring": "Test that extremities are correctly calculated in the presence of\n soft failed events.\n\n Tests a graph like, where time flows down the page:\n\n A B\n / \\ /\n / \\ /\n SF1 SF2\n | |\n SF3 |\n / \\ |\n | \\ |\n C SF4\n\n Where SF* are soft failed, and with them A, B and C marked as\n extremities. This should resolve to B and C being marked as extremity.\n ", + "n_words": 71, + "vocab_size": 48, + "n_whitespaces": 241, + "language": "en" + } + }, + { + "id": 205061, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/db/backends/oracle/creation.py", + "file_name": "creation.py", + "fun_name": "_test_settings_get", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def _test_settings_get(self, key, default=None, prefixed=None):\n \n settings_dict = self.connection.settings_dict\n val = settings_dict[\"TEST\"].get(key, default)\n if val is None and prefixed:\n val = TEST_DATABASE_PREFIX + settings_dict[prefixed]\n return val\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 71, + "n_words": 25, + "vocab_size": 20, + "complexity": 3, + "nloc": 6, + "token_counts": 53, + "n_ast_nodes": 83, + "n_identifiers": 10, + "d_id": 51003, + "documentation": { + "docstring": "\n Return a value from the test settings dict, or a given default, or a\n prefixed entry from the main settings dict.\n ", + "n_words": 21, + "vocab_size": 15, + "n_whitespaces": 43, + "language": "en" + } + }, + { + "id": 219497, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/_collections_abc.py", + "file_name": "_collections_abc.py", + "fun_name": "throw", + "commit_message": "add python 3.10.4 for windows", + "code": "def throw(self, typ, val=None, tb=None):\n \n if val is None:\n if tb is None:\n raise typ\n val = typ()\n if tb is not None:\n val = val.with_traceback(tb)\n raise val\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 104, + "n_words": 28, + "vocab_size": 16, + "complexity": 4, + "nloc": 8, + "token_counts": 49, + "n_ast_nodes": 78, + "n_identifiers": 6, + "d_id": 55605, + "documentation": { + "docstring": "Raise an exception in the coroutine.\n Return next yielded value or raise StopIteration.\n ", + "n_words": 13, + "vocab_size": 13, + "n_whitespaces": 27, + "language": "en" + } + }, + { + "id": 125913, + "commit_id": "c4a259828b9cfbfb4f09059f74808893a6d20b76", + "repo": "ray", + "path": "python/ray/tests/kuberay/scripts/non_terminated_nodes_count.py", + "file_name": "non_terminated_nodes_count.py", + "fun_name": "count_non_terminated_nodes", + "commit_message": "[kuberay] Update KubeRay operator commit, turn autoscaler RPC drain back on (#27077)\n\nThis PR:\r\n\r\n- Updates the KubeRay operator commit used in the Ray CI autoscaling test\r\n- Uses the RayCluster autoscaling sample config from the KubeRay repo in place of of a config from the Ray repo\r\n- Turns the autoscaler RPC worker drain back on, as I saw some dead node messages from the GCS, and the RPC drain is supposed to avoid those.\r\n\r\nSigned-off-by: Dmitri Gekhtman ", + "code": "def count_non_terminated_nodes() -> int:\n \n provider_config = _generate_provider_config(ray_cluster_namespace=\"default\")\n kuberay_node_provider = _get_node_provider(\n provider_config=provider_config, cluster_name=\"raycluster-autoscaler\"\n )\n nodes = kuberay_node_provider.non_terminated_nodes({})\n return len(nodes)\n\n", + "url": "https://github.com/ray-project/ray.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 43, + "n_words": 18, + "vocab_size": 16, + "complexity": 1, + "nloc": 10, + "token_counts": 41, + "n_ast_nodes": 73, + "n_identifiers": 11, + "d_id": 28023, + "documentation": { + "docstring": "Get the count of non terminated nodes for the Ray cluster raycluster-autoscaler\n in namespace default.\n ", + "n_words": 15, + "vocab_size": 14, + "n_whitespaces": 21, + "language": "en" + } + }, + { + "id": 111439, + "commit_id": "8f1ba4de582c5e5282c022a7713a56b47302cabe", + "repo": "spaCy", + "path": "spacy/util.py", + "file_name": "util.py", + "fun_name": "all_equal", + "commit_message": "Backport parser/alignment optimizations from `feature/refactor-parser` (#10952)", + "code": "def all_equal(iterable):\n \n g = itertools.groupby(iterable)\n return next(g, True) and not next(g, False)\n", + "url": "https://github.com/explosion/spaCy.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 21, + "n_words": 12, + "vocab_size": 11, + "complexity": 2, + "nloc": 3, + "token_counts": 29, + "n_ast_nodes": 48, + "n_identifiers": 6, + "d_id": 24410, + "documentation": { + "docstring": "Return True if all the elements are equal to each other\n (or if the input is an empty sequence), False otherwise.", + "n_words": 21, + "vocab_size": 19, + "n_whitespaces": 23, + "language": "en" + } + }, + { + "id": 71277, + "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", + "repo": "wagtail", + "path": "wagtail/admin/templatetags/wagtailadmin_tags.py", + "file_name": "wagtailadmin_tags.py", + "fun_name": "pagination_querystring", + "commit_message": "Reformat with black", + "code": "def pagination_querystring(context, page_number, page_key=\"p\"):\n \n return querystring(context, **{page_key: page_number})\n\n\n@register.inclusion_tag(\n \"wagtailadmin/pages/listing/_pagination.html\", takes_context=True\n)", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "@register.inclusion_tag(\n \"wagtailadmin/pages/listing/_pagination.html\", takes_context=True\n)", + "n_ast_errors": 1, + "ast_levels": 10, + "n_whitespaces": 19, + "n_words": 12, + "vocab_size": 12, + "complexity": 1, + "nloc": 2, + "token_counts": 24, + "n_ast_nodes": 59, + "n_identifiers": 8, + "d_id": 15657, + "documentation": { + "docstring": "\n Print out a querystring with an updated page number:\n\n {% if page.has_next_page %}\n Next page\n {% endif %}\n ", + "n_words": 22, + "vocab_size": 20, + "n_whitespaces": 54, + "language": "en" + } + }, + { + "id": 261584, + "commit_id": "d8fa96c29828e3ca79ddd5d7466521ac4d95213c", + "repo": "scikit-learn", + "path": "sklearn/impute/tests/test_impute.py", + "file_name": "test_impute.py", + "fun_name": "test_knn_imputer_keep_empty_features", + "commit_message": "ENH keep features with all missing values during imputation (#24770)\n\nCo-authored-by: Chiara Marmo \r\nCo-authored-by: Julien Jerphanion \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>\r\nCo-authored-by: Vitor SRG \r\nFixes https://github.com/scikit-learn/scikit-learn/pull/16695\r\nFixes https://github.com/scikit-learn/scikit-learn/issues/16426\r\nFixes https://github.com/scikit-learn/scikit-learn/issues/16977", + "code": "def test_knn_imputer_keep_empty_features(keep_empty_features):\n \n X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]])\n\n imputer = KNNImputer(keep_empty_features=keep_empty_features)\n\n for method in [\"fit_transform\", \"transform\"]:\n X_imputed = getattr(imputer, method)(X)\n if keep_empty_features:\n assert X_imputed.shape == X.shape\n assert_array_equal(X_imputed[:, 1], 0)\n else:\n assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)\n\n", + "url": "https://github.com/scikit-learn/scikit-learn.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 105, + "n_words": 39, + "vocab_size": 33, + "complexity": 3, + "nloc": 10, + "token_counts": 110, + "n_ast_nodes": 168, + "n_identifiers": 13, + "d_id": 76878, + "documentation": { + "docstring": "Check the behaviour of `keep_empty_features` for `KNNImputer`.", + "n_words": 7, + "vocab_size": 7, + "n_whitespaces": 6, + "language": "en" + } + }, + { + "id": 241678, + "commit_id": "8a549a550cb10189ff1db382f546a40cd1c6c5b3", + "repo": "lightning", + "path": "pytorch_lightning/callbacks/progress/base.py", + "file_name": "base.py", + "fun_name": "test_batch_idx", + "commit_message": "Integrate progress tracking into the progress bar (#11213)", + "code": "def test_batch_idx(self) -> int:\n \n if self.trainer is None:\n return 0\n return self.trainer.test_loop.epoch_loop.batch_progress.current.processed\n", + "url": "https://github.com/Lightning-AI/lightning.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 44, + "n_words": 12, + "vocab_size": 11, + "complexity": 2, + "nloc": 8, + "token_counts": 31, + "n_ast_nodes": 51, + "n_identifiers": 9, + "d_id": 69648, + "documentation": { + "docstring": "The number of batches processed during testing.\n\n Use this to update your progress bar.\n ", + "n_words": 14, + "vocab_size": 14, + "n_whitespaces": 28, + "language": "en" + } + }, + { + "id": 112855, + "commit_id": "98c1a77f61900d486f46d284c49fb65675dbee6a", + "repo": "nni", + "path": "nni/algorithms/hpo/bohb_advisor/bohb_advisor.py", + "file_name": "bohb_advisor.py", + "fun_name": "_get_one_trial_job", + "commit_message": "Support multiple HPO experiments in one process (#4855)", + "code": "def _get_one_trial_job(self):\n \n if not self.generated_hyper_configs:\n ret = {\n 'parameter_id': '-1_0_0',\n 'parameter_source': 'algorithm',\n 'parameters': ''\n }\n self.send(CommandType.NoMoreTrialJobs, nni.dump(ret))\n return None\n assert self.generated_hyper_configs\n params = self.generated_hyper_configs.pop(0)\n ret = {\n 'parameter_id': params[0],\n 'parameter_source': 'algorithm',\n 'parameters': params[1]\n }\n self.parameters[params[0]] = params[1]\n return ret\n", + "url": "https://github.com/microsoft/nni.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 217, + "n_words": 39, + "vocab_size": 26, + "complexity": 2, + "nloc": 18, + "token_counts": 95, + "n_ast_nodes": 164, + "n_identifiers": 12, + "d_id": 24770, + "documentation": { + "docstring": "get one trial job, i.e., one hyperparameter configuration.\n\n If this function is called, Command will be sent by BOHB:\n a. If there is a parameter need to run, will return \"NewTrialJob\" with a dict:\n {\n 'parameter_id': id of new hyperparameter\n 'parameter_source': 'algorithm'\n 'parameters': value of new hyperparameter\n }\n b. If BOHB don't have parameter waiting, will return \"NoMoreTrialJobs\" with\n {\n 'parameter_id': '-1_0_0',\n 'parameter_source': 'algorithm',\n 'parameters': ''\n }\n ", + "n_words": 67, + "vocab_size": 48, + "n_whitespaces": 189, + "language": "en" + } + }, + { + "id": 77976, + "commit_id": "b8a9a2d319b06fc2318d68d05b5a6cdf85b5b33d", + "repo": "wagtail", + "path": "wagtail/contrib/modeladmin/options.py", + "file_name": "options.py", + "fun_name": "get_menu_item", + "commit_message": "Deprecate wagtail.contrib.modeladmin.menus.SubMenu in favour of wagtail.admin.menu.Menu\n\nThe Menu class was not originally designed to accept menu items at constructor time (instead requiring them to be passed via hooks); ModelAdmin's SubMenu class patched this functionality in, and the documentation for extending admin views piggybacked on this. Add this functionality to the base Menu class so that we don't have this unnecessary dependency on ModelAdmin.", + "code": "def get_menu_item(self, order=None):\n \n return ModelAdminMenuItem(self, order or self.get_menu_order())\n", + "url": "https://github.com/wagtail/wagtail.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 10, + "n_whitespaces": 22, + "n_words": 8, + "vocab_size": 8, + "complexity": 2, + "nloc": 2, + "token_counts": 23, + "n_ast_nodes": 38, + "n_identifiers": 5, + "d_id": 16727, + "documentation": { + "docstring": "\n Utilised by Wagtail's 'register_menu_item' hook to create a menu item\n to access the listing view, or can be called by ModelAdminGroup\n to create a submenu\n ", + "n_words": 25, + "vocab_size": 20, + "n_whitespaces": 54, + "language": "en" + } + }, + { + "id": 247885, + "commit_id": "33ebee47e4e96a2b6fdf72091769e59034dc550f", + "repo": "synapse", + "path": "tests/unittest.py", + "file_name": "unittest.py", + "fun_name": "get_failure", + "commit_message": "Remove redundant `get_success` calls in test code (#12346)\n\nThere are a bunch of places we call get_success on an immediate value, which is unnecessary. Let's rip them out, and remove the redundant functionality in get_success and friends.", + "code": "def get_failure(self, d, exc):\n \n deferred: Deferred[Any] = ensureDeferred(d)\n self.pump()\n return self.failureResultOf(deferred, exc)\n", + "url": "https://github.com/matrix-org/synapse.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 40, + "n_words": 12, + "vocab_size": 12, + "complexity": 1, + "nloc": 4, + "token_counts": 35, + "n_ast_nodes": 56, + "n_identifiers": 10, + "d_id": 71975, + "documentation": { + "docstring": "\n Run a Deferred and get a Failure from it. The failure must be of the type `exc`.\n ", + "n_words": 17, + "vocab_size": 16, + "n_whitespaces": 32, + "language": "en" + } + }, + { + "id": 265175, + "commit_id": "a38a880e67d78eba52f19cc4c2613e9399939c2f", + "repo": "netbox", + "path": "netbox/utilities/request.py", + "file_name": "request.py", + "fun_name": "get_client_ip", + "commit_message": "Refactor source IP resolution logic", + "code": "def get_client_ip(request, additional_headers=()):\n \n HTTP_HEADERS = (\n 'HTTP_X_REAL_IP',\n 'HTTP_X_FORWARDED_FOR',\n 'REMOTE_ADDR',\n *additional_headers\n )\n for header in HTTP_HEADERS:\n if header in request.META:\n client_ip = request.META[header].split(',')[0]\n try:\n return ipaddress.ip_address(client_ip)\n except ValueError:\n raise ValueError(f\"Invalid IP address set for {header}: {client_ip}\")\n\n # Could not determine the client IP address from request headers\n return None\n", + "url": "https://github.com/netbox-community/netbox.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 16, + "n_whitespaces": 164, + "n_words": 48, + "vocab_size": 41, + "complexity": 4, + "nloc": 18, + "token_counts": 71, + "n_ast_nodes": 128, + "n_identifiers": 11, + "d_id": 78016, + "documentation": { + "docstring": "\n Return the client (source) IP address of the given request.\n ", + "n_words": 10, + "vocab_size": 9, + "n_whitespaces": 17, + "language": "en" + } + }, + { + "id": 101210, + "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", + "repo": "faceswap", + "path": "lib/align/alignments.py", + "file_name": "alignments.py", + "fun_name": "save_video_meta_data", + "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", + "code": "def save_video_meta_data(self, pts_time, keyframes):\n \n if pts_time[0] != 0:\n pts_time, keyframes = self._pad_leading_frames(pts_time, keyframes)\n\n sample_filename = next(fname for fname in self.data)\n basename = sample_filename[:sample_filename.rfind(\"_\")]\n logger.debug(\"sample filename: %s, base filename: %s\", sample_filename, basename)\n logger.info(\"Saving video meta information to Alignments file\")\n\n for idx, pts in enumerate(pts_time):\n meta = dict(pts_time=pts, keyframe=idx in keyframes)\n key = f\"{basename}_{idx + 1:06d}.png\"\n if key not in self.data:\n self.data[key] = dict(video_meta=meta, faces=[])\n else:\n self.data[key][\"video_meta\"] = meta\n\n logger.debug(\"Alignments count: %s, timestamp count: %s\", len(self.data), len(pts_time))\n if len(self.data) != len(pts_time):\n raise FaceswapError(\n \"There is a mismatch between the number of frames found in the video file \"\n f\"({len(pts_time)}) and the number of frames found in the alignments file \"\n f\"({len(self.data)}).\\nThis can be caused by a number of issues:\"\n \"\\n - The video has a Variable Frame Rate and FFMPEG is having a hard time \"\n \"calculating the correct number of frames.\"\n \"\\n - You are working with a Merged Alignments file. This is not supported for \"\n \"your current use case.\"\n \"\\nYou should either extract the video to individual frames, re-encode the \"\n \"video at a constant frame rate and re-run extraction or work with a dedicated \"\n \"alignments file for your requested video.\")\n self.save()\n", + "url": "https://github.com/deepfakes/faceswap.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 15, + "n_whitespaces": 512, + "n_words": 194, + "vocab_size": 128, + "complexity": 6, + "nloc": 28, + "token_counts": 189, + "n_ast_nodes": 353, + "n_identifiers": 26, + "d_id": 20631, + "documentation": { + "docstring": " Save video meta data to the alignments file.\n\n If the alignments file does not have an entry for every frame (e.g. if Extract Every N\n was used) then the frame is added to the alignments file with no faces, so that they video\n meta data can be stored.\n\n Parameters\n ----------\n pts_time: list\n A list of presentation timestamps (`float`) in frame index order for every frame in\n the input video\n keyframes: list\n A list of frame indices corresponding to the key frames in the input video\n ", + "n_words": 85, + "vocab_size": 55, + "n_whitespaces": 175, + "language": "en" + } + }, + { + "id": 42483, + "commit_id": "692adaff901dd9daf29400fdf3385130aefbfb2a", + "repo": "nltk", + "path": "nltk/util.py", + "file_name": "util.py", + "fun_name": "unweighted_minimum_spanning_digraph", + "commit_message": "Fix some tests in Wordnet-related DocStrings", + "code": "def unweighted_minimum_spanning_digraph(tree, children=iter, shapes=None, attr=None):\n \n return edges2dot(\n edge_closure(\n tree, lambda node: unweighted_minimum_spanning_dict(tree, children)[node]\n ),\n shapes,\n attr,\n )\n\n\n##########################################################################\n# Breadth-First / Depth-first Searches with Cycle Detection\n##########################################################################\n\n", + "url": "https://github.com/nltk/nltk.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 13, + "n_whitespaces": 72, + "n_words": 27, + "vocab_size": 26, + "complexity": 1, + "nloc": 8, + "token_counts": 44, + "n_ast_nodes": 67, + "n_identifiers": 10, + "d_id": 7568, + "documentation": { + "docstring": "\n\n Build a Minimum Spanning Tree (MST) of an unweighted graph,\n by traversing the nodes of a tree in breadth-first order,\n discarding eventual cycles.\n\n Return a representation of this MST as a string in the DOT graph language,\n which can be converted to an image by the 'dot' program from the Graphviz\n package, or nltk.parse.dependencygraph.dot2img(dot_string).\n\n The first argument should be the tree root;\n children should be a function taking as argument a tree node\n and returning an iterator of the node's children.\n\n >>> import nltk\n >>> wn=nltk.corpus.wordnet\n >>> from nltk.util import unweighted_minimum_spanning_digraph as umsd\n >>> print(umsd(wn.synset('bound.a.01'), lambda s:s.also_sees()))\n digraph G {\n \"Synset('bound.a.01')\" -> \"Synset('unfree.a.02')\";\n \"Synset('unfree.a.02')\" -> \"Synset('confined.a.02')\";\n \"Synset('unfree.a.02')\" -> \"Synset('dependent.a.01')\";\n \"Synset('unfree.a.02')\" -> \"Synset('restricted.a.01')\";\n \"Synset('restricted.a.01')\" -> \"Synset('classified.a.02')\";\n }\n \n ", + "n_words": 117, + "vocab_size": 81, + "n_whitespaces": 184, + "language": "en" + } + }, + { + "id": 20942, + "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", + "repo": "pipenv", + "path": "pipenv/patched/notpip/_vendor/typing_extensions.py", + "file_name": "typing_extensions.py", + "fun_name": "_gorg", + "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", + "code": "def _gorg(cls):\n \n assert isinstance(cls, GenericMeta)\n if hasattr(cls, '_gorg'):\n return cls._gorg\n while cls.__origin__ is not None:\n cls = cls.__origin__\n return cls\n\n\n_PROTO_WHITELIST = ['Callable', 'Awaitable',\n 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',\n 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',\n 'ContextManager', 'AsyncContextManager']\n\n", + "url": "https://github.com/pypa/pipenv.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 120, + "n_words": 35, + "vocab_size": 31, + "complexity": 3, + "nloc": 7, + "token_counts": 40, + "n_ast_nodes": 124, + "n_identifiers": 7, + "d_id": 3622, + "documentation": { + "docstring": "This function exists for compatibility with old typing versions.", + "n_words": 9, + "vocab_size": 9, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 42476, + "commit_id": "75f4e2183a80904dd3a6f958072ae2d063b51fad", + "repo": "nltk", + "path": "nltk/corpus/reader/wordnet.py", + "file_name": "wordnet.py", + "fun_name": "lemmas", + "commit_message": "Handle wordnet synsets that were lost in mapping", + "code": "def lemmas(self, lang=\"eng\"):\n \n if lang == \"eng\":\n return self._lemmas\n elif self._name:\n self._wordnet_corpus_reader._load_lang_data(lang)\n lemmark = []\n lemmy = self.lemma_names(lang)\n for lem in lemmy:\n temp = Lemma(\n self._wordnet_corpus_reader,\n self,\n lem,\n self._wordnet_corpus_reader._lexnames.index(self.lexname()),\n 0,\n None,\n )\n temp._lang = lang\n lemmark.append(temp)\n return lemmark\n", + "url": "https://github.com/nltk/nltk.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 17, + "n_whitespaces": 299, + "n_words": 38, + "vocab_size": 32, + "complexity": 4, + "nloc": 19, + "token_counts": 94, + "n_ast_nodes": 150, + "n_identifiers": 18, + "d_id": 7561, + "documentation": { + "docstring": "Return all the lemma objects associated with the synset", + "n_words": 9, + "vocab_size": 8, + "n_whitespaces": 8, + "language": "en" + } + }, + { + "id": 337588, + "commit_id": "23c0341262bd396a3ba9265614b3818d6e08a6c1", + "repo": "accelerate", + "path": "tests/test_examples.py", + "file_name": "test_examples.py", + "fun_name": "test_cross_validation", + "commit_message": "Refactor tests to use accelerate launch (#373)\n\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", + "code": "def test_cross_validation(self):\n testargs = .split()\n with mock.patch.dict(os.environ, {\"TESTING_MOCKED_DATALOADERS\": \"0\"}):\n output = subprocess.run(\n self._launch_args + testargs, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n ).stdout\n results = ast.literal_eval(re.findall(\"({.+})\", output)[-1])\n self.assertGreaterEqual(results[\"accuracy\"], 0.75)\n", + "url": "https://github.com/huggingface/accelerate.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 14, + "n_whitespaces": 97, + "n_words": 25, + "vocab_size": 23, + "complexity": 1, + "nloc": 11, + "token_counts": 92, + "n_ast_nodes": 148, + "n_identifiers": 23, + "d_id": 121085, + "documentation": { + "docstring": "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ", + "n_words": 3, + "vocab_size": 3, + "n_whitespaces": 25, + "language": "en" + } + }, + { + "id": 217736, + "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", + "repo": "XX-Net", + "path": "python3.10.4/Lib/http/client.py", + "file_name": "client.py", + "fun_name": "parse_headers", + "commit_message": "add python 3.10.4 for windows", + "code": "def parse_headers(fp, _class=HTTPMessage):\n \n headers = _read_headers(fp)\n hstring = b''.join(headers).decode('iso-8859-1')\n return email.parser.Parser(_class=_class).parsestr(hstring)\n\n", + "url": "https://github.com/XX-net/XX-Net.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 23, + "n_words": 11, + "vocab_size": 10, + "complexity": 1, + "nloc": 4, + "token_counts": 46, + "n_ast_nodes": 78, + "n_identifiers": 13, + "d_id": 54906, + "documentation": { + "docstring": "Parses only RFC2822 headers from a file pointer.\n\n email Parser wants to see strings rather than bytes.\n But a TextIOWrapper around self.rfile would buffer too many bytes\n from the stream, bytes which we later need to read as bytes.\n So we read the correct bytes here, as bytes, for email Parser\n to parse.\n\n ", + "n_words": 53, + "vocab_size": 40, + "n_whitespaces": 71, + "language": "en" + } + }, + { + "id": 266521, + "commit_id": "fee90b15a25b588bfb8a9ff047e851d43e78511f", + "repo": "ansible", + "path": "lib/ansible/module_utils/common/file.py", + "file_name": "file.py", + "fun_name": "is_executable", + "commit_message": "Misc typo fixes in module_utils (#76564)", + "code": "def is_executable(path):\n # This function's signature needs to be repeated\n # as the first line of its docstring.\n # This method is reused by the basic module,\n # the repetition helps the basic module's html documentation come out right.\n # http://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_docstring_signature\n \n # These are all bitfields so first bitwise-or all the permissions we're\n # looking for, then bitwise-and with the file's mode to determine if any\n # execute bits are set.\n return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])\n\n", + "url": "https://github.com/ansible/ansible.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 109, + "n_words": 79, + "vocab_size": 60, + "complexity": 1, + "nloc": 2, + "token_counts": 34, + "n_ast_nodes": 63, + "n_identifiers": 8, + "d_id": 78454, + "documentation": { + "docstring": "is_executable(path)\n\n is the given path executable?\n\n :arg path: The path of the file to check.\n\n Limitations:\n\n * Does not account for FSACLs.\n * Most times we really want to know \"Can the current user execute this\n file\". This function does not tell us that, only if any execute bit is set.\n ", + "n_words": 51, + "vocab_size": 43, + "n_whitespaces": 75, + "language": "en" + } + }, + { + "id": 31755, + "commit_id": "6c8f4c9a938a09749ea1b19a5fa2a8dd27e99a29", + "repo": "transformers", + "path": "src/transformers/models/groupvit/modeling_groupvit.py", + "file_name": "modeling_groupvit.py", + "fun_name": "project_group_token", + "commit_message": "Adding GroupViT Models (#17313)\n\n* add group vit and fixed test (except slow)\r\n\r\n* passing slow test\r\n\r\n* addressed some comments\r\n\r\n* fixed test\r\n\r\n* fixed style\r\n\r\n* fixed copy\r\n\r\n* fixed segmentation output\r\n\r\n* fixed test\r\n\r\n* fixed relative path\r\n\r\n* fixed copy\r\n\r\n* add ignore non auto configured\r\n\r\n* fixed docstring, add doc\r\n\r\n* fixed copies\r\n\r\n* Apply suggestions from code review\r\n\r\nmerge suggestions\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* resolve comment, renaming model\r\n\r\n* delete unused attr\r\n\r\n* use fix copies\r\n\r\n* resolve comments\r\n\r\n* fixed attn\r\n\r\n* remove unused vars\r\n\r\n* refactor tests\r\n\r\n* resolve final comments\r\n\r\n* add demo notebook\r\n\r\n* fixed inconsitent default\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* rename stage->stages\r\n\r\n* Create single GroupViTEncoderLayer class\r\n\r\n* Update conversion script\r\n\r\n* Simplify conversion script\r\n\r\n* Remove cross-attention class in favor of GroupViTAttention\r\n\r\n* Convert other model as well, add processor to conversion script\r\n\r\n* addressing final comment\r\n\r\n* fixed args\r\n\r\n* Update src/transformers/models/groupvit/modeling_groupvit.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Niels Rogge ", + "code": "def project_group_token(self, group_tokens):\n \n # [B, num_output_groups, C] <- [B, num_group_tokens, C]\n projected_group_tokens = self.mlp_inter(group_tokens)\n projected_group_tokens = self.norm_post_tokens(projected_group_tokens)\n return projected_group_tokens\n", + "url": "https://github.com/huggingface/transformers.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 8, + "n_whitespaces": 54, + "n_words": 19, + "vocab_size": 14, + "complexity": 1, + "nloc": 4, + "token_counts": 26, + "n_ast_nodes": 45, + "n_identifiers": 6, + "d_id": 5800, + "documentation": { + "docstring": "\n Args:\n group_tokens (torch.Tensor): group tokens, [batch_size, num_group_tokens, channels]\n\n Returns:\n projected_group_tokens (torch.Tensor): [batch_size, num_output_groups, channels]\n ", + "n_words": 14, + "vocab_size": 11, + "n_whitespaces": 58, + "language": "en" + } + }, + { + "id": 292690, + "commit_id": "cfd763db40544c31077b46631bbdd9655581dfe9", + "repo": "core", + "path": "homeassistant/components/sonos/media.py", + "file_name": "media.py", + "fun_name": "write_media_player_states", + "commit_message": "Refactor Sonos media metadata handling (#66840)\n\nCo-authored-by: Paulus Schoutsen ", + "code": "def write_media_player_states(self) -> None:\n \n dispatcher_send(self.hass, SONOS_MEDIA_UPDATED, self.soco.uid)\n", + "url": "https://github.com/home-assistant/core.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 9, + "n_whitespaces": 21, + "n_words": 7, + "vocab_size": 7, + "complexity": 1, + "nloc": 3, + "token_counts": 22, + "n_ast_nodes": 36, + "n_identifiers": 7, + "d_id": 91764, + "documentation": { + "docstring": "Send a signal to media player(s) to write new states.", + "n_words": 10, + "vocab_size": 9, + "n_whitespaces": 9, + "language": "en" + } + }, + { + "id": 203524, + "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", + "repo": "django", + "path": "django/contrib/admin/tests.py", + "file_name": "tests.py", + "fun_name": "select_option", + "commit_message": "Refs #33476 -- Reformatted code with Black.", + "code": "def select_option(self, selector, value):\n \n from selenium.webdriver.common.by import By\n from selenium.webdriver.support.ui import Select\n\n select = Select(self.selenium.find_element(By.CSS_SELECTOR, selector))\n select.select_by_value(value)\n", + "url": "https://github.com/django/django.git", + "language": "Python", + "ast_errors": "", + "n_ast_errors": 0, + "ast_levels": 11, + "n_whitespaces": 52, + "n_words": 17, + "vocab_size": 15, + "complexity": 1, + "nloc": 5, + "token_counts": 53, + "n_ast_nodes": 79, + "n_identifiers": 16, + "d_id": 50427, + "documentation": { + "docstring": "\n Select the